diff --git a/.copywrite.hcl b/.copywrite.hcl deleted file mode 100644 index ab9fa58f3411e..0000000000000 --- a/.copywrite.hcl +++ /dev/null @@ -1,16 +0,0 @@ -schema_version = 1 - -project { - license = "MPL-2.0" - copyright_year = 2015 - - # (OPTIONAL) A list of globs that should not have copyright/license headers. - # Supports doublestar glob patterns for more flexibility in defining which - # files or folders should be ignored - header_ignore = [ - "builtin/credential/aws/pkcs7/**", - "ui/node_modules/**", - "enos/modules/k8s_deploy_vault/raft-config.hcl", - "plugins/database/postgresql/scram/**" - ] -} diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 23958d8352807..9399e9b5dba9c 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - contact_links: - name: Ask a question url: https://discuss.hashicorp.com/c/vault diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml index 335ce6d5aab00..e300244895fbf 100644 --- a/.github/actionlint.yaml +++ b/.github/actionlint.yaml @@ -7,4 +7,3 @@ self-hosted-runner: - small - large - ondemand - diff --git a/.github/configs/milestone-check.json b/.github/configs/milestone-check.json deleted file mode 100644 index a06049b15398f..0000000000000 --- a/.github/configs/milestone-check.json +++ /dev/null @@ -1,8 +0,0 @@ -[ - { - "type": "check-milestone", - "title": "Milestone Check", - "success": "Milestone set", - "failure": "Milestone not set" - } - ] \ No newline at end of file diff --git a/.github/enos-run-matrices/ent.json b/.github/enos-run-matrices/ent.json new file mode 100644 index 0000000000000..d1dafc0fba595 --- /dev/null +++ b/.github/enos-run-matrices/ent.json @@ -0,0 +1,24 @@ +{ + "include": [ + { + "scenario": "smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms arch:amd64 builder:crt edition:ent", + "aws_region": "us-west-1" + }, + { + "scenario": "smoke backend:raft consul_version:1.12.3 distro:ubuntu seal:shamir arch:amd64 builder:crt edition:ent", + "aws_region": "us-west-2" + }, + { + "scenario": "upgrade backend:raft consul_version:1.11.7 distro:rhel seal:shamir arch:amd64 builder:crt edition:ent", + "aws_region": "us-west-1" + }, + { + "scenario": "upgrade backend:consul consul_version:1.11.7 distro:rhel seal:awskms arch:amd64 builder:crt edition:ent", + "aws_region": "us-west-2" + }, + { + "scenario": "autopilot distro:ubuntu seal:shamir arch:amd64 builder:crt edition:ent", + "aws_region": "us-west-1" + } + ] +} diff --git a/.github/enos-run-matrices/oss.json b/.github/enos-run-matrices/oss.json new file mode 100644 index 0000000000000..7ce25ebc49a22 --- /dev/null +++ b/.github/enos-run-matrices/oss.json @@ -0,0 +1,20 @@ +{ + "include": [ + { + "scenario": "smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms arch:amd64 builder:crt edition:oss", + "aws_region": "us-west-1" + }, + { + "scenario": "smoke backend:raft consul_version:1.12.3 distro:ubuntu seal:shamir arch:amd64 builder:crt edition:oss", + "aws_region": "us-west-2" + }, + { + "scenario": "upgrade backend:raft consul_version:1.11.7 distro:rhel seal:shamir arch:amd64 builder:crt edition:oss", + "aws_region": "us-west-1" + }, + { + "scenario": "upgrade backend:consul consul_version:1.11.7 distro:rhel seal:awskms arch:amd64 builder:crt edition:oss", + "aws_region": "us-west-2" + } + ] +} diff --git a/.github/scripts/generate-test-package-lists.sh b/.github/scripts/generate-test-package-lists.sh index 408e1780adb02..2f430a297656d 100755 --- a/.github/scripts/generate-test-package-lists.sh +++ b/.github/scripts/generate-test-package-lists.sh @@ -8,8 +8,6 @@ # solution. It distributes the entire set of test packages into 16 sublists, # which should roughly take an equal amount of time to complete. -set -e - test_packages=() base="github.com/hashicorp/vault" @@ -20,14 +18,14 @@ test_packages[1]+=" $base/command" test_packages[1]+=" $base/sdk/helper/keysutil" # Total time: 1160 -test_packages[2]+=" $base/sdk/helper/ocsp" +test_packages[2]+=" $base/builtin/credential/app-id" if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[2]+=" $base/vault/external_tests/replication-perf" fi # Total time: 1009 test_packages[3]+=" $base/builtin/credential/approle" -test_packages[3]+=" $base/command/agentproxyshared/sink/file" +test_packages[3]+=" $base/command/agent/sink/file" test_packages[3]+=" $base/command/agent/template" test_packages[3]+=" $base/helper/random" test_packages[3]+=" $base/helper/storagepacker" @@ -45,8 +43,6 @@ fi test_packages[4]+=" $base/http" test_packages[4]+=" $base/sdk/helper/pluginutil" test_packages[4]+=" $base/serviceregistration/kubernetes" -test_packages[4]+=" $base/tools/godoctests/pkg/analyzer" -test_packages[4]+=" $base/tools/gonilnilfunctions/pkg/analyzer" if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[4]+=" $base/vault/external_tests/apilock" test_packages[4]+=" $base/vault/external_tests/filteredpaths" @@ -88,19 +84,15 @@ test_packages[6]+=" $base/builtin/audit/file" test_packages[6]+=" $base/builtin/credential/github" test_packages[6]+=" $base/builtin/credential/okta" test_packages[6]+=" $base/builtin/logical/database/dbplugin" -test_packages[6]+=" $base/command/agentproxyshared/auth/cert" -test_packages[6]+=" $base/command/agentproxyshared/auth/jwt" -test_packages[6]+=" $base/command/agentproxyshared/auth/kerberos" -test_packages[6]+=" $base/command/agentproxyshared/auth/kubernetes" -test_packages[6]+=" $base/command/agentproxyshared/auth/token-file" -test_packages[6]+=" $base/command/agentproxyshared" -test_packages[6]+=" $base/command/agentproxyshared/cache" -test_packages[6]+=" $base/command/agentproxyshared/cache/cacheboltdb" -test_packages[6]+=" $base/command/agentproxyshared/cache/cachememdb" -test_packages[6]+=" $base/command/agentproxyshared/cache/keymanager" +test_packages[6]+=" $base/command/agent/auth/cert" +test_packages[6]+=" $base/command/agent/auth/jwt" +test_packages[6]+=" $base/command/agent/auth/kerberos" +test_packages[6]+=" $base/command/agent/auth/kubernetes" +test_packages[6]+=" $base/command/agent/cache" +test_packages[6]+=" $base/command/agent/cache/cacheboltdb" +test_packages[6]+=" $base/command/agent/cache/cachememdb" +test_packages[6]+=" $base/command/agent/cache/keymanager" test_packages[6]+=" $base/command/agent/config" -test_packages[6]+=" $base/command/agent/exec" -test_packages[6]+=" $base/command/proxy/config" test_packages[6]+=" $base/command/config" test_packages[6]+=" $base/command/token" if [ "${ENTERPRISE:+x}" == "x" ] ; then @@ -114,7 +106,6 @@ test_packages[6]+=" $base/helper/fairshare" test_packages[6]+=" $base/helper/flag-kv" test_packages[6]+=" $base/helper/flag-slice" test_packages[6]+=" $base/helper/forwarding" -test_packages[6]+=" $base/helper/logging" test_packages[6]+=" $base/helper/metricsutil" test_packages[6]+=" $base/helper/namespace" test_packages[6]+=" $base/helper/osutil" @@ -122,7 +113,6 @@ test_packages[6]+=" $base/helper/parseip" test_packages[6]+=" $base/helper/policies" test_packages[6]+=" $base/helper/testhelpers/logical" test_packages[6]+=" $base/helper/timeutil" -test_packages[6]+=" $base/helper/useragent" test_packages[6]+=" $base/helper/versions" test_packages[6]+=" $base/internalshared/configutil" test_packages[6]+=" $base/internalshared/listenerutil" @@ -147,20 +137,17 @@ test_packages[6]+=" $base/sdk/helper/kdf" test_packages[6]+=" $base/sdk/helper/locksutil" test_packages[6]+=" $base/sdk/helper/pathmanager" test_packages[6]+=" $base/sdk/helper/roottoken" -test_packages[6]+=" $base/sdk/helper/testhelpers/schema" test_packages[6]+=" $base/sdk/helper/xor" test_packages[6]+=" $base/sdk/physical/file" test_packages[6]+=" $base/sdk/plugin/pb" test_packages[6]+=" $base/serviceregistration/kubernetes/client" test_packages[6]+=" $base/shamir" test_packages[6]+=" $base/vault/cluster" -test_packages[6]+=" $base/vault/eventbus" test_packages[6]+=" $base/vault/external_tests/api" if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[6]+=" $base/vault/external_tests/consistencyheaders" fi test_packages[6]+=" $base/vault/external_tests/expiration" -test_packages[6]+=" $base/vault/external_tests/hcp_link" test_packages[6]+=" $base/vault/external_tests/kv" if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[6]+=" $base/vault/external_tests/plugins" @@ -203,7 +190,7 @@ test_packages[7]+=" $base/vault/quotas" # Total time: 779 test_packages[8]+=" $base/builtin/credential/aws/pkcs7" test_packages[8]+=" $base/builtin/logical/totp" -test_packages[8]+=" $base/command/agentproxyshared/auth" +test_packages[8]+=" $base/command/agent/auth" test_packages[8]+=" $base/physical/raft" test_packages[8]+=" $base/sdk/framework" test_packages[8]+=" $base/sdk/plugin" @@ -223,8 +210,8 @@ if [ "${ENTERPRISE:+x}" == "x" ] ; then fi # Total time: 310 -test_packages[9]+=" $base/vault/hcp_link/capabilities/api_capability" test_packages[9]+=" $base/vault/external_tests/plugin" +test_packages[9]+=" $base/builtin/logical/cassandra" # Total time: 925 test_packages[10]+=" $base/builtin/credential/ldap" @@ -247,12 +234,10 @@ test_packages[12]+=" $base/plugins/database/mssql" test_packages[12]+=" $base/plugins/database/mysql" # Total time: 704 -test_packages[13]+=" $base/builtin/logical/pkiext" test_packages[13]+=" $base/command/server" test_packages[13]+=" $base/physical/aerospike" test_packages[13]+=" $base/physical/cockroachdb" test_packages[13]+=" $base/plugins/database/postgresql" -test_packages[13]+=" $base/plugins/database/postgresql/scram" if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[13]+=" $base/vault/external_tests/filteredpathsext" fi @@ -261,6 +246,10 @@ test_packages[13]+=" $base/vault/external_tests/policy" # Total time: 374 test_packages[14]+=" $base/builtin/credential/radius" test_packages[14]+=" $base/builtin/logical/ssh" +test_packages[14]+=" $base/builtin/logical/mongodb" +test_packages[14]+=" $base/builtin/logical/mssql" +test_packages[14]+=" $base/builtin/logical/mysql" +test_packages[14]+=" $base/builtin/logical/postgresql" if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[14]+=" $base/enthelpers/wal" fi @@ -287,10 +276,4 @@ if [ "${ENTERPRISE:+x}" == "x" ] ; then test_packages[16]+=" $base/vault/external_tests/autosnapshots" test_packages[16]+=" $base/vault/external_tests/replicationext" test_packages[16]+=" $base/vault/external_tests/sealext" -fi - -for i in $(cd $(git rev-parse --show-toplevel) && go list -test -json ./... | - jq -r '.ForTest | select(.!=null) | select(.|test("_binary$"))'); -do - test_packages[17]+=" $i" -done +fi \ No newline at end of file diff --git a/.github/scripts/test-generate-test-package-lists.sh b/.github/scripts/test-generate-test-package-lists.sh index c3d1cb60670b4..bc8d31cb3bb31 100755 --- a/.github/scripts/test-generate-test-package-lists.sh +++ b/.github/scripts/test-generate-test-package-lists.sh @@ -72,4 +72,4 @@ if (( ${#all_packages[@]} != $test_package_count )) ; then done echo "Packages in test_packages that aren't used: ${unused_packages// /}" -fi +fi \ No newline at end of file diff --git a/.github/scripts/verify_changes.sh b/.github/scripts/verify_changes.sh index 78a41b14b3186..81f3b688eb5bd 100755 --- a/.github/scripts/verify_changes.sh +++ b/.github/scripts/verify_changes.sh @@ -3,13 +3,15 @@ event_type=$1 # GH event type (pull_request) ref_name=$2 # branch reference that triggered the workflow -base_ref=$3 # PR branch base ref +head_ref=$3 # PR branch head ref +base_ref=$4 # PR branch base ref changed_dir="" if [[ "$event_type" == "pull_request" ]]; then + git fetch --no-tags --prune origin $head_ref git fetch --no-tags --prune origin $base_ref - head_commit="HEAD" + head_commit="origin/$head_ref" base_commit="origin/$base_ref" else git fetch --no-tags --prune origin $ref_name diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index 817de2d37b937..abe7e7237b810 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -10,6 +10,6 @@ jobs: actionlint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - name: "Check workflow files" uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint@sha256:93834930f56ca380be3e9a3377670d7aa5921be251b9c774891a39b3629b83b8 diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index f78464a8c8b9e..ebcbada20e0a1 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -11,11 +11,11 @@ jobs: backport-targeted-release-branch: if: github.event.pull_request.merged runs-on: ubuntu-latest - container: hashicorpdev/backport-assistant:0.3.3 + container: hashicorpdev/backport-assistant:0.2.5 steps: - name: Backport changes to targeted release branch run: | - backport-assistant backport -merge-method=squash -gh-automerge + backport-assistant backport env: BACKPORT_LABEL_REGEXP: "backport/(?P\\d+\\.\\d+\\.[+\\w]+)" BACKPORT_TARGET_TEMPLATE: "release/{{.target}}" diff --git a/.github/workflows/build-vault-oss.yml b/.github/workflows/build-vault-oss.yml index 60480d53ae23e..6b6f17f09dd94 100644 --- a/.github/workflows/build-vault-oss.yml +++ b/.github/workflows/build-vault-oss.yml @@ -40,14 +40,14 @@ jobs: runs-on: ubuntu-latest name: Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 with: go-version: ${{ inputs.go-version }} - name: Set up node and yarn - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + uses: actions/setup-node@v3 with: - node-version-file: './ui/package.json' + node-version: 14 cache: yarn cache-dependency-path: ui/yarn.lock - name: Build UI @@ -58,6 +58,8 @@ jobs: GOARCH: ${{ inputs.goarch }} GOOS: ${{ inputs.goos }} GO_TAGS: ${{ inputs.go-tags }} + # We started stripping symbol tables in 1.13.x + KEEP_SYMBOLS: true run: make ci-build - name: Determine artifact basename env: @@ -68,7 +70,7 @@ jobs: env: BUNDLE_PATH: out/${{ env.ARTIFACT_BASENAME }}.zip run: make ci-bundle - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@v3 with: name: ${{ env.ARTIFACT_BASENAME }}.zip path: out/${{ env.ARTIFACT_BASENAME }}.zip @@ -96,13 +98,13 @@ jobs: echo "RPM_PACKAGE=$(basename out/*.rpm)" >> "$GITHUB_ENV" echo "DEB_PACKAGE=$(basename out/*.deb)" >> "$GITHUB_ENV" - if: ${{ inputs.create-packages }} - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: ${{ env.RPM_PACKAGE }} path: out/${{ env.RPM_PACKAGE }} if-no-files-found: error - if: ${{ inputs.create-packages }} - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: ${{ env.DEB_PACKAGE }} path: out/${{ env.DEB_PACKAGE }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ccf4a35194882..cc2da2af06090 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -4,11 +4,6 @@ name: build on: workflow_dispatch: pull_request: - # The default types for pull_request are [ opened, synchronize, reopened ]. - # This is insufficient for our needs, since we're skipping stuff on PRs in - # draft mode. By adding the ready_for_review type, when a draft pr is marked - # ready, we run everything, including the stuff we'd have skipped up until now. - types: [ opened, synchronize, reopened, ready_for_review ] push: branches: - main @@ -17,22 +12,19 @@ on: jobs: # verify-changes determines if the changes are only for docs (website) verify-changes: - if: github.event.pull_request.draft == false runs-on: ubuntu-latest outputs: is_docs_change: ${{ steps.get-changeddir.outputs.is_docs_change }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 # Use fetch depth 0 for comparing changes to base branch + - uses: actions/checkout@v3 - name: Get changed directories id: get-changeddir env: TYPE: ${{ github.event_name }} REF_NAME: ${{ github.ref_name }} + HEAD_REF: ${{ github.head_ref }} BASE: ${{ github.base_ref }} - run: ./.github/scripts/verify_changes.sh ${{ env.TYPE }} ${{ env.REF_NAME }} ${{ env.BASE }} + run: ./.github/scripts/verify_changes.sh ${{ env.TYPE }} ${{ env.REF_NAME }} ${{ env.HEAD_REF }} ${{ env.BASE }} product-metadata: # do not run build and test steps for docs changes @@ -51,7 +43,7 @@ jobs: vault-version: ${{ steps.get-metadata.outputs.vault-version }} vault-base-version: ${{ steps.get-metadata.outputs.vault-base-version }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 - name: Get metadata id: get-metadata env: @@ -73,7 +65,7 @@ jobs: with: version: ${{ steps.get-metadata.outputs.vault-version }} product: ${{ steps.get-metadata.outputs.package-name }} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@v3 with: name: metadata.json path: ${{ steps.generate-metadata-file.outputs.filepath }} @@ -152,7 +144,7 @@ jobs: matrix: arch: [arm, arm64, 386, amd64] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 - uses: hashicorp/actions-docker-build@v1 with: version: ${{ needs.product-metadata.outputs.vault-version }} @@ -173,7 +165,7 @@ jobs: matrix: arch: [amd64] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 - uses: hashicorp/actions-docker-build@v1 with: version: ${{ needs.product-metadata.outputs.vault-version }} @@ -184,15 +176,6 @@ jobs: test: name: Test ${{ matrix.build-artifact-name }} - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - # This condition can be removed in future if enos workflow is updated to - # workflow_run event - if: "! github.event.pull_request.head.repo.fork" needs: - product-metadata - build-linux @@ -216,16 +199,6 @@ jobs: test-docker-k8s: name: Test Docker K8s - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - # GHA secrets are only ready on workflow_run for public repo - # This condition can be removed in future if enos workflow is updated to - # workflow_run event - if: "! github.event.pull_request.head.repo.fork" needs: - product-metadata - build-docker diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index 155cc8eff1e7e..3811a767fb18a 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3658f62927470..2f857653503f9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,11 +1,6 @@ name: CI on: pull_request: - # The default types for pull_request are [ opened, synchronize, reopened ]. - # This is insufficient for our needs, since we're skipping stuff on PRs in - # draft mode. By adding the ready_for_review type, when a draft pr is marked - # ready, we run everything, including the stuff we'd have skipped up until now. - types: [ opened, synchronize, reopened, ready_for_review ] push: branches: - main @@ -54,7 +49,7 @@ jobs: container: image: returntocorp/semgrep@sha256:ffc6f3567654f9431456d49fd059dfe548f007c494a7eb6cd5a1a3e50d813fb3 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - name: Run Semgrep Rules id: semgrep run: semgrep ci --include '*.go' --config 'tools/semgrep/ci' @@ -72,8 +67,8 @@ jobs: - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 with: go-version-file: ./.go-version cache: true @@ -92,7 +87,7 @@ jobs: if: ${{ needs.setup.outputs.enterprise != '' && github.base_ref != '' }} runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c with: fetch-depth: 0 - id: determine-branch @@ -130,10 +125,7 @@ jobs: !startsWith(github.head_ref, 'backport/docs/') uses: ./.github/workflows/test-go.yml with: - # The regular Go tests use an extra runner to execute the - # binary-dependent tests. We isolate them there so that the - # other tests aren't slowed down waiting for a binary build. - total-runners: 17 + total-runners: 16 go-arch: amd64 go-build-tags: '${{ needs.setup.outputs.go-build-tags }},deadlock' runs-on: ${{ needs.setup.outputs.compute-larger }} @@ -146,7 +138,6 @@ jobs: - setup-go-cache # Don't run this job for PR branches starting with 'ui/', 'backport/ui/', 'docs/', or 'backport/docs/' if: | - github.event.pull_request.draft == false && !startsWith(github.head_ref, 'ui/') && !startsWith(github.head_ref, 'backport/ui/') && !startsWith(github.head_ref, 'docs/') && @@ -163,13 +154,11 @@ jobs: go-build-tags: ${{ needs.setup.outputs.go-build-tags }} runs-on: ${{ needs.setup.outputs.compute-huge }} enterprise: ${{ needs.setup.outputs.enterprise }} - name: "-race" secrets: inherit test-go-fips: name: Run Go tests with FIPS configuration # Only run this job for the enterprise repo if the PR branch doesn't start with 'ui/', 'backport/ui/', 'docs/', or 'backport/docs/' if: | - github.event.pull_request.draft == false && needs.setup.outputs.enterprise == 1 && !startsWith(github.head_ref, 'ui/') && !startsWith(github.head_ref, 'backport/ui/') && @@ -189,21 +178,18 @@ jobs: go-build-tags: '${{ needs.setup.outputs.go-build-tags }},deadlock,cgo,fips,fips_140_2' runs-on: ${{ needs.setup.outputs.compute-larger }} enterprise: ${{ needs.setup.outputs.enterprise }} - name: "-fips" secrets: inherit test-ui: name: Test UI # The test-ui job is only run on: # - pushes to main and branches starting with "release/" # - PRs where the branch starts with "ui/", "backport/ui/", "merge", or when base branch starts with "release/" - # - PRs with the "ui" label on github if: | github.ref_name == 'main' || startsWith(github.ref_name, 'release/') || startsWith(github.head_ref, 'ui/') || startsWith(github.head_ref, 'backport/ui/') || - startsWith(github.head_ref, 'merge') || - contains(github.event.pull_request.labels.*.name, 'ui') + startsWith(github.head_ref, 'merge') needs: - setup permissions: @@ -211,26 +197,26 @@ jobs: contents: read runs-on: ${{ fromJSON(needs.setup.outputs.compute-larger) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 with: go-version-file: ./.go-version cache: true # Setup node.js without caching to allow running npm install -g yarn (next step) - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c with: - node-version-file: './ui/package.json' + node-version: 14 - id: install-yarn run: | npm install -g yarn # Setup node.js with caching using the yarn.lock file - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c with: - node-version-file: './ui/package.json' + node-version: 14 cache: yarn cache-dependency-path: ui/yarn.lock - id: install-browser - uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 + uses: browser-actions/setup-chrome@29abc1a83d1d71557708563b4bc962d0f983a376 - id: ui-dependencies name: ui-dependencies working-directory: ./ui @@ -281,12 +267,12 @@ jobs: cd ui mkdir -p test-results/qunit yarn test:oss - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce with: name: test-results-ui path: ui/test-results if: always() - - uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f # TSCCR: no entry for repository "test-summary/action" + - uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f with: paths: "ui/test-results/qunit/results.xml" show: "fail" @@ -300,4 +286,4 @@ jobs: runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} steps: - run: | - tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' + tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000000000..af1f723925a96 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,50 @@ +name: "Code scanning - scheduled (weekly) or on-demand" + +on: + schedule: + - cron: '0 15 * * 0' + workflow_dispatch: + +jobs: + CodeQL-Build: + + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + # Override language selection by uncommenting this and choosing your languages + with: + languages: go, javascript + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/drepecated-functions-checker.yml b/.github/workflows/drepecated-functions-checker.yml deleted file mode 100644 index 853681b34f820..0000000000000 --- a/.github/workflows/drepecated-functions-checker.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: "Check Deprecations" - -on: - pull_request: - # Runs on PRs to main - branches: - - main - -jobs: - deprecations-check: - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - name: Checkout code - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - fetch-depth: 0 # by default the checkout action doesn't checkout all branches - - name: Setup Go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 - with: - go-version-file: ./.go-version - cache: true - - name: Install required tools - run: | - make bootstrap - - name: Check deprecations for files in diff - run: | - # Need to run this from repository root and not from scripts/ as staticcheck works - # only on packages - ./scripts/deprecations-checker.sh ${{ github.event.pull_request.base.ref }} ${{ github.event.repository.name }} - \ No newline at end of file diff --git a/.github/workflows/enos-fmt.yml b/.github/workflows/enos-fmt.yml index d3d5ade0d6309..298b2dc185f1e 100644 --- a/.github/workflows/enos-fmt.yml +++ b/.github/workflows/enos-fmt.yml @@ -15,7 +15,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 - uses: hashicorp/setup-terraform@v2 with: terraform_wrapper: false diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml index a39bab7aaacfc..7cddbc56ed32c 100644 --- a/.github/workflows/enos-release-testing-oss.yml +++ b/.github/workflows/enos-release-testing-oss.yml @@ -15,7 +15,7 @@ jobs: vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} vault-version: ${{ steps.get-metadata.outputs.vault-version }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 with: # Check out the repository at the same Git SHA that was used to create # the artifacts to get the correct metadata. @@ -64,7 +64,6 @@ jobs: save-metadata: runs-on: linux - if: always() needs: test steps: - name: Persist metadata diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml index 6af4d0393d8a9..e5200d025151b 100644 --- a/.github/workflows/enos-run-k8s.yml +++ b/.github/workflows/enos-run-k8s.yml @@ -31,7 +31,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@v3 - name: Set up Terraform uses: hashicorp/setup-terraform@v2 with: @@ -44,7 +44,7 @@ jobs: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Download Docker Image id: download - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@e9ef242655d12993efdcda9058dee2db83a2cb9b with: name: ${{ inputs.artifact-name }} path: ./enos/support/downloads diff --git a/.github/workflows/godoc-test-checker.yml b/.github/workflows/godoc-test-checker.yml deleted file mode 100644 index e56ebda42f1a4..0000000000000 --- a/.github/workflows/godoc-test-checker.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Check Go Docs for tests - -on: - pull_request: - types: [opened, synchronize] - # Runs on PRs to main - branches: - - main - -jobs: - godoc-test-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - fetch-depth: 0 - - name: Set Up Go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 - with: - cache: true - go-version-file: ./.go-version - - name: Verify new tests have go docs - run: make ci-vet-godoctests diff --git a/.github/workflows/milestone-checker.yml b/.github/workflows/milestone-checker.yml deleted file mode 100644 index b23ce6081dd44..0000000000000 --- a/.github/workflows/milestone-checker.yml +++ /dev/null @@ -1,35 +0,0 @@ -# This workflow checks that there is either a 'pr/no-milestone' label applied to a PR -# or there is a milestone associated with a PR - -name: Check Milestone - -on: - pull_request: - types: [opened, synchronize, labeled, unlabeled] - # Runs on PRs to main and release branches - branches: - - main - - release/** - issues: - types: [milestoned, demilestoned] - -jobs: - # checks that a milestone entry is present for a PR - milestone-check: - # If there is a `pr/no-milestone` label we ignore this check - if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/no-milestone') || ( github.event.name == 'labeled' && github.event.label == 'pr/no-milestone' ) )" - runs-on: ubuntu-latest - steps: - - name: Checkout Actions - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - repository: "grafana/grafana-github-actions" - path: ./actions - ref: main - - name: Install Actions - run: npm install --production --prefix ./actions - - name: Run PR Checks - uses: ./actions/pr-checks - with: - token: ${{secrets.GITHUB_TOKEN}} - configPath: configs/milestone-check diff --git a/.github/workflows/nil-nil-function-checker.yml b/.github/workflows/nil-nil-function-checker.yml deleted file mode 100644 index 04c3e1ba43ddb..0000000000000 --- a/.github/workflows/nil-nil-function-checker.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Check Functions For nil, nil returns - -on: - pull_request: - types: [opened, synchronize] - # Runs on PRs to main - branches: - - main - -jobs: - # Note: if there is a function we want to ignore this check for, - # You can add 'ignore-nil-nil-function-check' somewhere in the - # godoc for the function. - nil-nil-function-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - fetch-depth: 0 - - name: Set Up Go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 - with: - cache: true - go-version-file: ./.go-version - - name: Verify functions don't return nil, nil - run: make ci-vet-gonilnilfunctions diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml index dd6f3392f9f1f..d49550ff5a05f 100644 --- a/.github/workflows/oss.yml +++ b/.github/workflows/oss.yml @@ -19,9 +19,9 @@ jobs: runs-on: ubuntu-latest steps: - if: github.event.pull_request != null - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@v3 - if: github.event.pull_request != null - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 + uses: dorny/paths-filter@v2 id: changes with: # derived from CODEOWNERS @@ -68,7 +68,7 @@ jobs: - if: github.event.pull_request != null && steps.changes.outputs.ui == 'true' run: echo "PROJECT=171" >> "$GITHUB_ENV" - - uses: actions/add-to-project@v0.3.0 # TSCCR: no entry for repository "actions/add-to-project" + - uses: actions/add-to-project@v0.3.0 with: project-url: https://github.com/orgs/hashicorp/projects/${{ env.PROJECT }} github-token: ${{ secrets.TRIAGE_GITHUB_TOKEN }} diff --git a/.github/workflows/remove-labels.yml b/.github/workflows/remove-labels.yml deleted file mode 100644 index 014b6752af7a4..0000000000000 --- a/.github/workflows/remove-labels.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Autoremove Labels - -on: - issues: - types: [closed] - pull_request_target: - types: [closed] - -jobs: - - RemoveWaitingLabelFromClosedIssueOrPR: - if: github.event.action == 'closed' - runs-on: ubuntu-latest - steps: - - name: Remove triaging labels from closed issues and PRs - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 - with: - labels: | - waiting-for-response \ No newline at end of file diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml deleted file mode 100644 index 3d58acbb7a9f5..0000000000000 --- a/.github/workflows/security-scan.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Security Scan - -on: - push: - branches: [main] - pull_request: - branches: - - 'main' - - '!oss-merge-main*' - -jobs: - scan: - runs-on: ['linux', 'large'] - if: ${{ github.actor != 'dependabot[bot]' || github.actor != 'hc-github-team-secure-vault-core' }} - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - - name: Set up Go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 - with: - go-version: 1.18 - - - name: Set up Python - uses: actions/setup-python@57ded4d7d5e986d7296eab16560982c6dd7c923b # v4.6.0 - with: - python-version: 3.x - - - name: Clone Security Scanner repo - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - repository: hashicorp/security-scanner - token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }} - path: security-scanner - ref: 5a491479f4131d343afe0a4f18f6fcd36639f3fa - - - name: Install dependencies - shell: bash - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - mkdir "$HOME/.bin" - cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep" - go build -o scan-plugin-semgrep . - mv scan-plugin-semgrep "$HOME/.bin" - - cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql" - go build -o scan-plugin-codeql . - mv scan-plugin-codeql "$HOME/.bin" - - # Semgrep - python3 -m pip install semgrep - - # CodeQL - LATEST=$(gh release list --repo https://github.com/github/codeql-action | cut -f 3 | sort --version-sort | tail -n1) - gh release download --repo https://github.com/github/codeql-action --pattern codeql-bundle-linux64.tar.gz "$LATEST" - tar xf codeql-bundle-linux64.tar.gz -C "$HOME/.bin" - - # Add to PATH - echo "$HOME/.bin" >> "$GITHUB_PATH" - echo "$HOME/.bin/codeql" >> "$GITHUB_PATH" - - - name: Scan - id: scan - uses: ./security-scanner - # env: - # Note: this _should_ work, but causes some issues with Semgrep. - # Instead, rely on filtering in the SARIF Output step. - #SEMGREP_BASELINE_REF: ${{ github.base_ref }} - with: - repository: "$PWD" - - - name: SARIF Output - shell: bash - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - cat results.sarif - - - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@9a866ed4524fc3422c3af1e446dab8efa3503411 # codeql-bundle-20230418 - with: - sarif_file: results.sarif diff --git a/.github/workflows/setup-go-cache.yml b/.github/workflows/setup-go-cache.yml index 6d8096c5dc1e9..3b8040a20545c 100644 --- a/.github/workflows/setup-go-cache.yml +++ b/.github/workflows/setup-go-cache.yml @@ -8,10 +8,10 @@ jobs: setup-go-cache: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c #v3.3.0 as of 2023-01-18 - id: setup-go name: Setup go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 #v3.4.0 as of 2022-12-07 with: go-version-file: ./.go-version cache: true diff --git a/.github/workflows/stable-website.yaml b/.github/workflows/stable-website.yaml index 1447151d73deb..fdd6da27f9d6a 100644 --- a/.github/workflows/stable-website.yaml +++ b/.github/workflows/stable-website.yaml @@ -10,7 +10,7 @@ jobs: name: Cherry pick to stable-website branch steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@v2 with: ref: stable-website - run: | diff --git a/.github/workflows/test-acc-dockeronly-nightly.yml b/.github/workflows/test-acc-dockeronly-nightly.yml deleted file mode 100644 index 4a78bb62f49a2..0000000000000 --- a/.github/workflows/test-acc-dockeronly-nightly.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: test-go-acceptance-nightly - -on: - # Change to nightly cadence once API-credential-requiring tests are added to the jobs - workflow_dispatch: - -# Currently the jobs here are only for acceptance tests that have no dependencies except for Docker -jobs: - plugins-database: - uses: ./.github/workflows/test-run-acc-tests-for-path.yml - strategy: - matrix: - name: [mongodb, mysql, postgresql] - with: - name: plugins-database-${{ matrix.name }} - path: plugins/database/${{ matrix.name }} - - external: - uses: ./.github/workflows/test-run-acc-tests-for-path.yml - strategy: - matrix: - name: [api, identity, token] - with: - name: external-${{ matrix.name }} - path: vault/external_tests/${{ matrix.name }} - - # Suggestions and tips for adding more acceptance test jobs: - # - the job name is up to you, but it should be derived from the path that the tests are found in - # - for instance, "plugins-database" is a job for acceptance tests in the plugins/database path - # - the path will be used with go test wildcards, but don't include the preceding "./" or following "/..." - # - the name parameter is used to construct the log artifact's name, make it something that is related to the path diff --git a/.github/workflows/test-ci-bootstrap.yml b/.github/workflows/test-ci-bootstrap.yml deleted file mode 100644 index a0efa8ddfdd0d..0000000000000 --- a/.github/workflows/test-ci-bootstrap.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: test-ci-bootstrap - -on: - workflow_dispatch: - pull_request: - branches: - - main - paths: - - enos/ci/** - - .github/workflows/test-ci-bootstrap.yml - push: - branches: - - main - paths: - - enos/ci/** - - .github/workflows/test-ci-bootstrap.yml - -jobs: - bootstrap-ci: - runs-on: ubuntu-latest - env: - TF_WORKSPACE: "${{ github.event.repository.name }}-ci-enos-bootstrap" - TF_VAR_repository: ${{ github.event.repository.name }} - TF_VAR_aws_ssh_public_key: ${{ secrets.SSH_KEY_PUBLIC_CI }} - TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }} - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - name: Set up Terraform - uses: hashicorp/setup-terraform@v2 - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@e1e17a757e536f70e52b5a12b2e8d1d1c60e04ef # v2.0.0 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: us-east-1 - role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - - name: Init Terraform - id: tf_init - run: | - terraform -chdir=enos/ci/bootstrap init - - name: Plan Terraform - id: tf_plan - run: | - terraform -chdir=enos/ci/bootstrap plan - - name: Apply Terraform - if: ${{ github.ref == 'refs/heads/main' }} - id: tf_apply - run: | - terraform -chdir=enos/ci/bootstrap apply -auto-approve diff --git a/.github/workflows/test-ci-cleanup.yml b/.github/workflows/test-ci-cleanup.yml deleted file mode 100644 index 0e3c90dd3b5fd..0000000000000 --- a/.github/workflows/test-ci-cleanup.yml +++ /dev/null @@ -1,88 +0,0 @@ -name: test-ci-cleanup -on: - schedule: - # * is a special character in YAML so you have to quote this string - - cron: '05 02 * * *' - -jobs: - setup: - runs-on: ubuntu-latest - outputs: - regions: ${{steps.setup.outputs.regions}} - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@e1e17a757e536f70e52b5a12b2e8d1d1c60e04ef # v2.0.0 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: us-east-1 - role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - - name: Get all regions - id: setup - run: | - echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> "$GITHUB_OUTPUT" - - aws-nuke: - needs: setup - runs-on: ubuntu-latest - container: - image: rebuy/aws-nuke - options: - --user root - -t - env: - AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} - TIME_LIMIT: "72h" - timeout-minutes: 60 - steps: - - name: Configure AWS credentials - id: aws-configure - uses: aws-actions/configure-aws-credentials@e1e17a757e536f70e52b5a12b2e8d1d1c60e04ef # v2.0.0 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: us-east-1 - role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - mask-aws-account-id: false - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - name: Configure - run: | - cp enos/ci/aws-nuke.yml . - sed -i "s/ACCOUNT_NUM/${{ steps.aws-configure.outputs.aws-account-id }}/g" aws-nuke.yml - sed -i "s/TIME_LIMIT/${TIME_LIMIT}/g" aws-nuke.yml - # We don't care if cleanup succeeds or fails, because dependencies be dependenceies, - # we'll fail on actually actionable things in the quota steep afterwards. - - name: Clean up abandoned resources - # Filter STDERR because it's super noisy about things we don't have access to - run: | - aws-nuke -c aws-nuke.yml -q --no-dry-run --force 2>/tmp/aws-nuke-error.log || true - - check-quotas: - needs: [ setup, aws-nuke ] - runs-on: ubuntu-latest - container: - image: jantman/awslimitchecker - env: - AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID_CI }} - AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY_CI }} - strategy: - matrix: - region: ${{ fromJSON(needs.setup.outputs.regions) }} - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@e1e17a757e536f70e52b5a12b2e8d1d1c60e04ef # v2.0.0 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: us-east-1 - role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - # Currently just checking VPC limits across all region, can add more checks here in future - - name: Check AWS Quotas - run: awslimitchecker -S "VPC" -r ${{matrix.region}} diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml index 53ae5f9c9f37b..9fa25bd0f3b28 100644 --- a/.github/workflows/test-enos-scenario-ui.yml +++ b/.github/workflows/test-enos-scenario-ui.yml @@ -32,14 +32,18 @@ jobs: name: Get metadata runs-on: ubuntu-latest outputs: + go-version: ${{ steps.get-metadata.outputs.go-version }} + node-version: ${{ steps.get-metadata.outputs.node-version }} runs-on: ${{ steps.get-metadata.outputs.runs-on }} vault_edition: ${{ steps.get-metadata.outputs.vault_edition }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 - id: get-metadata env: IS_ENT: ${{ startsWith(github.event.repository.name, 'vault-enterprise' ) }} run: | + echo "go-version=$(cat ./.go-version)" >> "$GITHUB_OUTPUT" + echo "node-version=$(cat ./ui/.nvmrc)" >> "$GITHUB_OUTPUT" if [ "$IS_ENT" == true ]; then echo "detected vault_edition=ent" echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> "$GITHUB_OUTPUT" @@ -67,20 +71,20 @@ jobs: GOPRIVATE: github.com/hashicorp steps: - name: Checkout - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@v3 - name: Set Up Go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + uses: actions/setup-go@v3 with: - go-version-file: ./.go-version + go-version: ${{ needs.get-metadata.outputs.go-version }} - uses: hashicorp/action-setup-enos@v1 with: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Set Up Git run: git config --global url."https://${{ secrets.elevated_github_token }}:@github.com".insteadOf "https://github.com" - name: Set Up Node - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + uses: actions/setup-node@v3 with: - node-version-file: './ui/package.json' + node-version: ${{ needs.get-metadata.outputs.node-version }} - name: Set Up Terraform uses: hashicorp/setup-terraform@v2 with: @@ -104,12 +108,12 @@ jobs: sudo apt install -y libnss3-dev libgdk-pixbuf2.0-dev libgtk-3-dev libxss-dev libasound2 - name: Install Chrome if: steps.chrome-check.outputs.chrome-version == 'not-installed' - uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 + uses: browser-actions/setup-chrome@v1 - name: Installed Chrome Version run: | echo "Installed Chrome Version = [$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null)]" - name: Configure AWS credentials from Test account - uses: aws-actions/configure-aws-credentials@e1e17a757e536f70e52b5a12b2e8d1d1c60e04ef # v2.0.0 + uses: aws-actions/configure-aws-credentials@v1-node16 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml index b30608008ca5a..d0eeeeed81ff9 100644 --- a/.github/workflows/test-go.yml +++ b/.github/workflows/test-go.yml @@ -22,7 +22,6 @@ on: description: A space-separated list of additional build flags. required: false type: string - default: '' runs-on: description: An expression indicating which kind of runners to use. required: false @@ -32,11 +31,6 @@ on: description: A comma-separated list of additional build tags to consider satisfied during the build. required: false type: string - name: - description: A suffix to append to archived test results - required: false - default: '' - type: string env: ${{ fromJSON(inputs.env-vars) }} @@ -45,7 +39,7 @@ jobs: runs-on: ${{ fromJSON(inputs.runs-on) }} name: Verify Test Package Distribution steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - id: test working-directory: .github/scripts run: | @@ -85,8 +79,8 @@ jobs: GOPRIVATE: github.com/hashicorp/* TIMEOUT_IN_MINUTES: 60 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 with: go-version-file: ./.go-version cache: true @@ -122,21 +116,9 @@ jobs: if: github.repository != 'hashicorp/vault-enterprise' run: | git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com - - id: go-mod-download - if: matrix.runner-index > 16 - env: - GOPRIVATE: github.com/hashicorp/* - run: time go mod download -x - - id: build - if: matrix.runner-index > 16 - env: - GOPRIVATE: github.com/hashicorp/* - run: time make ci-bootstrap dev - id: run-go-tests name: Run Go tests timeout-minutes: ${{ fromJSON(env.TIMEOUT_IN_MINUTES) }} - env: - COMMIT_SHA: ${{ github.sha }} run: | set -exo pipefail @@ -176,11 +158,6 @@ jobs: #export HCP_SCADA_ADDRESS=${{ secrets.HCP_SCADA_ADDRESS }} fi - if [ -f bin/vault ]; then - VAULT_BINARY="$(pwd)/bin/vault" - export VAULT_BINARY - fi - # shellcheck disable=SC2086 # can't quote package list GOARCH=${{ inputs.go-arch }} \ go run gotest.tools/gotestsum --format=short-verbose \ @@ -210,13 +187,13 @@ jobs: datadog-ci junit upload --service "$GITHUB_REPOSITORY" test-results/go-test/results.xml if: always() - name: Archive test results - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce with: - name: test-results${{ inputs.name }}-${{ matrix.runner-index }} + name: test-results-${{ matrix.runner-index }} path: test-results/ if: always() - name: Create a summary of tests - uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f # TSCCR: no entry for repository "test-summary/action" + uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f with: paths: "test-results/go-test/results.xml" show: "fail" diff --git a/.github/workflows/test-run-acc-tests-for-path.yml b/.github/workflows/test-run-acc-tests-for-path.yml deleted file mode 100644 index 10d539e7d85c2..0000000000000 --- a/.github/workflows/test-run-acc-tests-for-path.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: test-run-go-tests-for-path - -on: - workflow_call: - inputs: - name: - description: 'The name to use that will appear in the output log file artifact' - required: true - type: string - path: - description: 'The path to the test without the precedeing "./" or following "/..." e.g. go test -v ./$path/...' - required: true - type: string - # We will need to add the capacity for receiving passed secrets once we get to the tests that require API credentials - -env: - VAULT_ACC: 1 - -jobs: - go-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - name: Set Up Go - uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0 - with: - go-version-file: ./.go-version - - run: go test -v ./${{ inputs.path }}/... 2>&1 | tee ${{ inputs.name }}.txt - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 - with: - name: ${{ inputs.name }}-output - path: ${{ inputs.name }}.txt - retention-days: 2 diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml index 7a1abd7bc3038..b40b6b50b3f25 100644 --- a/.github/workflows/test-run-enos-scenario-matrix.yml +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -72,7 +72,7 @@ jobs: MATRIX_FILE: ./.github/enos-run-matrices/${{ inputs.matrix-file-name }}.json MATRIX_TEST_GROUP: ${{ inputs.matrix-test-group }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 with: ref: ${{ inputs.vault-revision }} - id: metadata @@ -106,13 +106,13 @@ jobs: ENOS_VAR_vault_license_path: ./support/vault.hclic ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 - uses: hashicorp/setup-terraform@v2 with: # the Terraform wrapper will break Terraform execution in Enos because # it changes the output to text when we expect it to be JSON. terraform_wrapper: false - - uses: aws-actions/configure-aws-credentials@e1e17a757e536f70e52b5a12b2e8d1d1c60e04ef # v2.0.0 + - uses: aws-actions/configure-aws-credentials@v1-node16 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} @@ -131,7 +131,7 @@ jobs: chmod 600 "./enos/support/private_key.pem" echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT" - if: contains(inputs.matrix-file-name, 'github') - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: ${{ inputs.build-artifact-name }} path: ./enos/support/downloads @@ -150,7 +150,7 @@ jobs: run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - name: Upload Debug Data if: failure() - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: # The name of the artifact is the same as the matrix scenario name with the spaces replaced with underscores and colons replaced by equals. name: ${{ steps.prepare_scenario.outputs.debug_data_artifact_name }} diff --git a/.gitignore b/.gitignore index 81e0bbeb74468..628bfa88a0a6e 100644 --- a/.gitignore +++ b/.gitignore @@ -51,7 +51,6 @@ Vagrantfile # Configs *.hcl -!.copywrite.hcl !.release/ci.hcl !.release/security-scan.hcl !.release/linux/package/etc/vault.d/vault.hcl @@ -59,11 +58,10 @@ Vagrantfile !command/server/test-fixtures/**/*.hcl !enos/**/*.hcl -# Enos +# Enos local Terraform files enos/.enos enos/enos-local.vars.hcl enos/support -# Enos local Terraform files enos/.terraform/* enos/.terraform.lock.hcl enos/*.tfstate @@ -133,5 +131,3 @@ website/components/node_modules *.log tools/godoctests/.bin -tools/gonilnilfunctions/.bin - diff --git a/.go-version b/.go-version index 0bd54efd31633..91c48c058d7f2 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.4 +1.19.9 diff --git a/.release/ci.hcl b/.release/ci.hcl index 335a21200fbfa..0be4e8ba9b716 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - schema = "1" project "vault" { diff --git a/.release/docker/docker-entrypoint.sh b/.release/docker/docker-entrypoint.sh index 2b9b8f35a1606..3b72da25b7f41 100755 --- a/.release/docker/docker-entrypoint.sh +++ b/.release/docker/docker-entrypoint.sh @@ -1,7 +1,4 @@ #!/usr/bin/dumb-init /bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e # Note above that we run dumb-init as PID 1 in order to reap zombie processes diff --git a/.release/docker/ubi-docker-entrypoint.sh b/.release/docker/ubi-docker-entrypoint.sh index 794e69c614867..6f818bcd439fb 100755 --- a/.release/docker/ubi-docker-entrypoint.sh +++ b/.release/docker/ubi-docker-entrypoint.sh @@ -1,7 +1,4 @@ #!/bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e # Prevent core dumps diff --git a/.release/linux/package/etc/vault.d/vault.hcl b/.release/linux/package/etc/vault.d/vault.hcl index 4a59d36725073..33c2e5f3225ed 100644 --- a/.release/linux/package/etc/vault.d/vault.hcl +++ b/.release/linux/package/etc/vault.d/vault.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Full configuration options can be found at https://www.vaultproject.io/docs/configuration ui = true diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl index 3a49b69c59b14..19aadfc71ae1a 100644 --- a/.release/release-metadata.hcl +++ b/.release/release-metadata.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/vault" url_docker_registry_ecr = "https://gallery.ecr.aws/hashicorp/vault" url_license = "https://github.com/hashicorp/vault/blob/main/LICENSE" diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl index 62460e431db96..6d394feaacc18 100644 --- a/.release/security-scan.hcl +++ b/.release/security-scan.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - container { dependencies = true alpine_secdb = true diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ad1236952d6c..bbbea3d397c84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,904 +1,47 @@ -## 1.14.0-rc1 -### June 08, 2023 - -CHANGES: - -* auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 [[GH-20758](https://github.com/hashicorp/vault/pull/20758)] -* auth/azure: Updated plugin from v0.13.0 to v0.15.0 [[GH-20816](https://github.com/hashicorp/vault/pull/20816)] -* auth/centrify: Updated plugin from v0.14.0 to v0.15.1 [[GH-20745](https://github.com/hashicorp/vault/pull/20745)] -* auth/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20725](https://github.com/hashicorp/vault/pull/20725)] -* auth/jwt: Updated plugin from v0.15.0 to v0.16.0 [[GH-20799](https://github.com/hashicorp/vault/pull/20799)] -* auth/kubernetes: Update plugin to v0.16.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] -* core: Bump Go version to 1.20.4. -* core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. [[GH-20834](https://github.com/hashicorp/vault/pull/20834)] -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] -* database/couchbase: Updated plugin from v0.9.0 to v0.9.2 [[GH-20764](https://github.com/hashicorp/vault/pull/20764)] -* database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 [[GH-20751](https://github.com/hashicorp/vault/pull/20751)] -* replication (enterprise): Add a new parameter for the update-primary API call -that allows for setting of the primary cluster addresses directly, instead of -via a token. -* secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 [[GH-20750](https://github.com/hashicorp/vault/pull/20750)] -* secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 [[GH-20787](https://github.com/hashicorp/vault/pull/20787)] -* secrets/aure: Updated plugin from v0.15.0 to v0.16.0 [[GH-20777](https://github.com/hashicorp/vault/pull/20777)] -* secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] -* secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 [[GH-20807](https://github.com/hashicorp/vault/pull/20807)] -* secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20818](https://github.com/hashicorp/vault/pull/20818)] -* secrets/keymgmt: Updated plugin to v0.9.1 -* secrets/kubernetes: Update plugin to v0.5.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] -* secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 [[GH-20742](https://github.com/hashicorp/vault/pull/20742)] -* secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. [[GH-20654](https://github.com/hashicorp/vault/pull/20654)] - -FEATURES: - -* **AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. [[GH-20536](https://github.com/hashicorp/vault/pull/20536)] -* **Automated License Utilization Reporting**: Added automated license -utilization reporting, which sends minimal product-license [metering -data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) -to HashiCorp without requiring you to manually collect and report them. -* **MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] -* **NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience [[GH-pki-ui-improvements](https://github.com/hashicorp/vault/pull/pki-ui-improvements)] -* **Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. [[GH-20752](https://github.com/hashicorp/vault/pull/20752)] -* **Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. [[GH-20548](https://github.com/hashicorp/vault/pull/20548)] -* cli: Add 'agent generate-config' sub-command [[GH-20530](https://github.com/hashicorp/vault/pull/20530)] -* **Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. - -IMPROVEMENTS: - -* activitylog: EntityRecord protobufs now contain a ClientType field for -distinguishing client sources. [[GH-20626](https://github.com/hashicorp/vault/pull/20626)] -* agent: Add integration tests for agent running in process supervisor mode [[GH-20741](https://github.com/hashicorp/vault/pull/20741)] -* agent: Add logic to validate env_template entries in configuration [[GH-20569](https://github.com/hashicorp/vault/pull/20569)] -* agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent [[GH-20628](https://github.com/hashicorp/vault/pull/20628)] -* api: GET ... /sys/internal/counters/activity?current_billing_period=true now -results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] -* audit: forwarded requests can now contain host metadata on the node it was sent 'from' or a flag to indicate that it was forwarded. -* auth/kerberos: Enable plugin multiplexing -auth/kerberos: Upgrade plugin dependencies [[GH-20771](https://github.com/hashicorp/vault/pull/20771)] -* command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. [[GH-20629](https://github.com/hashicorp/vault/pull/20629)] -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. [[GH-20559](https://github.com/hashicorp/vault/pull/20559)] -* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] -* core: include namespace path in granting_policies block of audit log -* core: include reason for ErrReadOnly on PBPWF writing failures -* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] -* database/elasticsearch: Upgrade plugin dependencies [[GH-20767](https://github.com/hashicorp/vault/pull/20767)] -* database/redis: Upgrade plugin dependencies [[GH-20763](https://github.com/hashicorp/vault/pull/20763)] -* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] -* secrets/consul: Improve error message when ACL bootstrapping fails. [[GH-20891](https://github.com/hashicorp/vault/pull/20891)] -* secrets/gcpkms: Enable plugin multiplexing -secrets/gcpkms: Upgrade plugin dependencies [[GH-20784](https://github.com/hashicorp/vault/pull/20784)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] -* secrets/transit: Respond to writes with updated key policy, cache configuration. [[GH-20652](https://github.com/hashicorp/vault/pull/20652)] -* secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. [[GH-20736](https://github.com/hashicorp/vault/pull/20736)] -* ui: Add filtering by auth type and auth name to the Authentication Method list view. [[GH-20747](https://github.com/hashicorp/vault/pull/20747)] -* ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata [[GH-20590](https://github.com/hashicorp/vault/pull/20590)] - -BUG FIXES: - -* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. -* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix a caching issue when replicating filtered data to -a performance secondary. This resulted in the data being set to nil in the cache -and a "invalid value" error being returned from the API. -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] -* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. -secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. -sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] -* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] -* ui: fixes auto_rotate_period ttl input for transit keys [[GH-20731](https://github.com/hashicorp/vault/pull/20731)] -* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] - -## 1.13.3 -### June 08, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.4. -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] -* replication (enterprise): Add a new parameter for the update-primary API call -that allows for setting of the primary cluster addresses directly, instead of -via a token. -* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] - -IMPROVEMENTS: - -* Add debug symbols back to builds to fix Dynatrace support [[GH-20519](https://github.com/hashicorp/vault/pull/20519)] -* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] -* autopilot: Update version to v0.2.0 to add better support for respecting min quorum [[GH-19472](https://github.com/hashicorp/vault/pull/19472)] -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] -* core: include namespace path in granting_policies block of audit log -* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] -* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] -* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] - -BUG FIXES: - -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] -* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] -* command/server: fixes panic in Vault server command when running in recovery mode [[GH-20418](https://github.com/hashicorp/vault/pull/20418)] -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core/identity: Allow updates of only the custom-metadata for entity alias. [[GH-20368](https://github.com/hashicorp/vault/pull/20368)] -* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. -* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] -* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. [[GH-20354](https://github.com/hashicorp/vault/pull/20354)] -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] -* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. -secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. -sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] -* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] -* ui: fixes issue creating mfa login enforcement from method enforcements tab [[GH-20603](https://github.com/hashicorp/vault/pull/20603)] -* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] - -## 1.13.2 -### April 26, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.3. - -SECURITY: - -* core/seal: Fix handling of HMACing of seal-wrapped storage entries from HSMs using CKM_AES_CBC or CKM_AES_CBC_PAD which may have allowed an attacker to conduct a padding oracle attack. This vulnerability, CVE-2023-2197, affects Vault from 1.13.0 up to 1.13.1 and was fixed in 1.13.2. [[HCSEC-2023-14](https://discuss.hashicorp.com/t/hcsec-2023-14-vault-enterprise-vulnerable-to-padding-oracle-attacks-when-using-a-cbc-based-encryption-mechanism-with-a-hsm/53322)] - -IMPROVEMENTS: - -* Add debug symbols back to builds to fix Dynatrace support [[GH-20294](https://github.com/hashicorp/vault/pull/20294)] -* cli/namespace: Add detailed flag to output additional namespace information -such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] -* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] -* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the -`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] -* core: include reason for ErrReadOnly on PBPWF writing failures -* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration -for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] -* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] -* sys/wrapping: Add example how to unwrap without authentication in Vault [[GH-20109](https://github.com/hashicorp/vault/pull/20109)] -* ui: Allows license-banners to be dismissed. Saves preferences in localStorage. [[GH-19116](https://github.com/hashicorp/vault/pull/19116)] - -BUG FIXES: - -* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] -* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] -* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] -* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. -* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] -* replication (enterprise): Fix a caching issue when replicating filtered data to -a performance secondary. This resulted in the data being set to nil in the cache -and a "invalid value" error being returned from the API. -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. -auth/cert: Fix OCSP validation against Vault's PKI engine. [[GH-20181](https://github.com/hashicorp/vault/pull/20181)] -* secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. [[GH-20034](https://github.com/hashicorp/vault/pull/20034)] -* secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. [[GH-20057](https://github.com/hashicorp/vault/pull/20057)] -* secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. [[GH-20058](https://github.com/hashicorp/vault/pull/20058)] -* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] -* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] -* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] -* ui: fixes remaining doc links to include /vault in path [[GH-20070](https://github.com/hashicorp/vault/pull/20070)] -* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] -* website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. [[GH-20216](https://github.com/hashicorp/vault/pull/20216)] - -## 1.13.1 -### March 29, 2023 - -SECURITY: - -* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] -* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] -* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - -IMPROVEMENTS: - -* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id -website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] -* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch -option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] -* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] -* database/elasticsearch: Update error messages resulting from Elasticsearch API errors [[GH-19545](https://github.com/hashicorp/vault/pull/19545)] -* events: Suppress log warnings triggered when events are sent but the events system is not enabled. [[GH-19593](https://github.com/hashicorp/vault/pull/19593)] - -BUG FIXES: - -* agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. [[GH-19483](https://github.com/hashicorp/vault/pull/19483)] -* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. -* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] -* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. -* kmip (enterprise): Fix a problem forwarding some requests to the active node. -* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] -* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19640](https://github.com/hashicorp/vault/pull/19640)] -* secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug [[GH-19624](https://github.com/hashicorp/vault/pull/19624)] -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library [[GH-19428](https://github.com/hashicorp/vault/pull/19428)] -* ui: fixes SSH engine config deletion [[GH-19448](https://github.com/hashicorp/vault/pull/19448)] -* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] -* ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted [[GH-19541](https://github.com/hashicorp/vault/pull/19541)] -* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] -* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.13.0 -### March 01, 2023 - -SECURITY: - -* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] -* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] -* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. -This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] -* core: Bump Go version to 1.20.1. -* core: Vault version has been moved out of sdk and into main vault module. -Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] -* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] -* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] -* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] -* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] -* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. -* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] - -FEATURES: - -* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] -* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] -* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] -* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] -* **PKI Cross-Cluster Revocations**: Revocation information can now be -synchronized across primary and performance replica clusters offering -a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] -* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] -* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys -* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent -brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] -* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. -* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. - -IMPROVEMENTS: - -* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] -* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] -* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] -* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] -* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] -* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] -* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] -* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] -* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] -* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] -* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] -* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] -* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] -* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a -Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] -* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] -* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] -* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] -* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] -* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] -* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] -* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] -* auth/token (enterprise): Allow batch token creation in perfStandby nodes -* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. -Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] -* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] -* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum -* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] -* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] -* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] -* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] -* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] -* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] -* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] -* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] -* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] -* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] -* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] -* core/server: Added an environment variable to write goroutine stacktraces to a -temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] -* core: Add RPCs to read and update userFailedLoginInfo map -* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] -* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] -* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from -sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] -* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user -with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] -* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] -* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] -* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] -* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] -* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] -* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] -* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] -* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] -* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] -* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] -* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] -* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] -* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] -* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] -* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] -* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] -* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable -to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. -* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] -* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] -* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] -* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] -* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] -* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] -* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] -* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] -* plugins: Let Vault unseal and mount deprecated builtin plugins in a -deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] -* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] -* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] -* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] -* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] -* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] -* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] -* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] -* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] -* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] -* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] -* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when -allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] -* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] -* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] -* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] -* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] -* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] -* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] -* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] -* secrets/transit: Add an optional reference field to batch operation items -which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] -* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] -* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] -* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. -* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] -* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] -* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] -* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] -* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] -* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] -* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] -* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] -* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] -* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] -* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] - -DEPRECATIONS: - -* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] - -BUG FIXES: - -* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] -* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] -* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] -* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] -* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] -* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] -* core (enterprise): Fix missing quotation mark in error message -* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] -* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] -* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] -* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] -* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] -* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] -* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] -* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: Linux packages now have vendor label and set the default label to HashiCorp. -This fix is implemented for any future releases, but will not be updated for historical releases. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] -* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] -* core: fix a start up race condition where performance standbys could go into a -mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] -* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] -* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. -* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. -* license (enterprise): Fix bug where license would update even if the license didn't change. -* licensing (enterprise): update autoloaded license cache after reload -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] -* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] -* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] -* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] -* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] -* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] -* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] -* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] -* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] -* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] -* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] -* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] -* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] -* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.12.7 -### June 08, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.9. -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] - -IMPROVEMENTS: - -* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* core: include namespace path in granting_policies block of audit log -* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] -* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] - -BUG FIXES: - -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] -* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. -secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. -sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] -* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] - -## 1.12.6 -### April 26, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.8. - -IMPROVEMENTS: - -* cli/namespace: Add detailed flag to output additional namespace information -such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] -* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] -* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the -`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] -* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration -for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] -* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] - -BUG FIXES: - -* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] -* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] -* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] -* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] -* replication (enterprise): Fix a caching issue when replicating filtered data to -a performance secondary. This resulted in the data being set to nil in the cache -and a "invalid value" error being returned from the API. -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] -* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] -* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] -* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] - -## 1.12.5 -### March 29, 2023 - -SECURITY: - -* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] -* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] -* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - -IMPROVEMENTS: - -* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id -website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] -* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch -option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] -* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] - -BUG FIXES: - -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. -* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] -* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. -* kmip (enterprise): Fix a problem forwarding some requests to the active node. -* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] -* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19641](https://github.com/hashicorp/vault/pull/19641)] -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] -* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] -* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.12.4 -### March 01, 2023 - -SECURITY: -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* core: Bump Go version to 1.19.6. - -IMPROVEMENTS: - -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] -* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] - -BUG FIXES: - -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] -* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] -* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.12.3 -### February 6, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.4. - -IMPROVEMENTS: - -* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* plugins: Let Vault unseal and mount deprecated builtin plugins in a -deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] - -BUG FIXES: - -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. -* licensing (enterprise): update autoloaded license cache after reload -* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] -* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] -* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] -* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] -* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] -## 1.12.2 -### November 30, 2022 - -CHANGES: - -* core: Bump Go version to 1.19.3. -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] - -IMPROVEMENTS: - -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] - -BUG FIXES: - -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - -## 1.12.1 -### November 2, 2022 - -IMPROVEMENTS: - -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] - -BUG FIXES: - -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - ## 1.12.0 -### October 13, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] +### Unreleased CHANGES: -* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] * core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] -* core: Bump Go version to 1.19.2. +* core: Bump Go version to 1.18.5. * core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] * identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] -* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. -* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades +will not be allowed if the license termination time is before the build date of the binary. * plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] * plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] -* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] -* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] FEATURES: -* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys -* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] -* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] -* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] -* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] * **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] -* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations -* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature +* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] +* secrets/pki: Add an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for +a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] * ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] -* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. IMPROVEMENTS: -* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api * activity (enterprise): Added new clients unit tests to test accuracy of estimates -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] * agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] * agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] * agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] * agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] * api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] * api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] -* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] -* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. -When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] * auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] -* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] * auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] -* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] -* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] * auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] -* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] * auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] * auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] * auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the +Kerberos config in Vault. This removes any instance names found in the keytab +service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] * auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] * cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] -* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] * command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] * command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] -* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] * core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command * core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. * core (enterprise): Add custom metadata support for namespaces @@ -909,9 +52,7 @@ When either the ttl and num_uses fields are not specified, the role's configurat * core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role * core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] * core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] -* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] * core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] * core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] * core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] * core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] @@ -921,12 +62,8 @@ When either the ttl and num_uses fields are not specified, the role's configurat * identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] * identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] * physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] -* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] * plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] -* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] -* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] * plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. * secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] * secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] * secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] @@ -934,21 +71,11 @@ When either the ttl and num_uses fields are not specified, the role's configurat * secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] * secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] * secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] -* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] -* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] -* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] -* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] -* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] -* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] +* secrets/kubernetes: Add allowed_kubernetes_namespace_selector to allow selecting Kubernetes namespaces with a label selector when configuring roles. [[GH-16240](https://github.com/hashicorp/vault/pull/16240)] * secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] -* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] -* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] -* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] * secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] * secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] * secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] * secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] * secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] * secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] @@ -958,334 +85,75 @@ When either the ttl and num_uses fields are not specified, the role's configurat * secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] * secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] * secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] -* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] -* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] -* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] * ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] * storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] * storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] * ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] -* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] * ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] * ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] -* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] * ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] * website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] -* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] -* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] * website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] -* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] +* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] BUG FIXES: * agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] * agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] * api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] * api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] * api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] * api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] * auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] * command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] * core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] * core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. * core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] * core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] * core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* core/managed-keys (enterprise): fix panic when having `cache_disable` true * core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases * core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] * core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty * core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] * core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] * core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] * database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] * debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] * debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] * identity (enterprise): Fix a data race when creating an entity for a local alias. -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] * identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] * identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] * openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] * plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] * plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] * quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] * replication (enterprise): Fix data race in SaveCheckpoint() * replication (enterprise): Fix data race in saveCheckpoint. -* replication (enterprise): Fix possible data race during merkle diff/sync * secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] * secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] * secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] * secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] * secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] * secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] * secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] * secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. * storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. * storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] * storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] * ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] * ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] * ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] * ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] * ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] * ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] * ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] * vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] -## 1.11.11 -### June 08, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.9. -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] - -IMPROVEMENTS: - -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] - -BUG FIXES: - -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation - -## 1.11.10 -### April 26, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.8. - -IMPROVEMENTS: - -* cli/namespace: Add detailed flag to output additional namespace information -such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] -* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] -* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the -`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] -* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration -for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] - -BUG FIXES: - -* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] -* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] -* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] -* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] -* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] - -## 1.11.9 -### March 29, 2023 - -SECURITY: - -* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] -* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] -* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - -IMPROVEMENTS: - -* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id -website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] -* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch -option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] -* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] - -BUG FIXES: - -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#190](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/190)] [[GH-19720](https://github.com/hashicorp/vault/pull/19720)] -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. -* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] -* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] -* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] -* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.11.8 -### March 01, 2023 - -SECURITY: - -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* core: Bump Go version to 1.19.6. - -IMPROVEMENTS: - -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] - -BUG FIXES: - -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.11.7 -### February 6, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.4. - -IMPROVEMENTS: - -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] - -BUG FIXES: - -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. -* licensing (enterprise): update autoloaded license cache after reload -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - -## 1.11.6 -### November 30, 2022 - -IMPROVEMENTS: - -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] - -BUG FIXES: - -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - -## 1.11.5 -### November 2, 2022 - -IMPROVEMENTS: - -* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] - -BUG FIXES: - -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.11.4 -### September 30, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] - -IMPROVEMENTS: - -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] - -BUG FIXES: - -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] - ## 1.11.3 ### August 31, 2022 -SECURITY: - -* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - CHANGES: * core: Bump Go version to 1.17.13. @@ -1321,11 +189,7 @@ Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/1660 * ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] * ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] * ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - + ## 1.11.2 ### August 2, 2022 @@ -1339,14 +203,10 @@ BUG FIXES: * core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] * secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] * ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] - + ## 1.11.1 ### July 21, 2022 -SECURITY: - -* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - CHANGES: * core: Bump Go version to 1.17.12. @@ -1370,11 +230,11 @@ BUG FIXES: * storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] * transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. * ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] - + SECURITY: * storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - + ## 1.11.0 ### June 20, 2022 @@ -1598,119 +458,9 @@ rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hash * ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] * ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] -## 1.10.11 -### March 01, 2023 - -SECURITY: - -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* core: Bump Go version to 1.19.6. - -IMPROVEMENTS: - -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] - -BUG FIXES: - -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] - -## 1.10.10 -### February 6, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.4. - -IMPROVEMENTS: - -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] - -BUG FIXES: - -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* licensing (enterprise): update autoloaded license cache after reload -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - -## 1.10.9 -### November 30, 2022 - -BUG FIXES: - -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - -## 1.10.8 -### November 2, 2022 - -BUG FIXES: - -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.10.7 -### September 30, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] - -BUG FIXES: - -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - ## 1.10.6 ### August 31, 2022 -SECURITY: - -* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - CHANGES: * core: Bump Go version to 1.17.13. @@ -1736,18 +486,10 @@ Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/1660 * ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] * ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] * ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] ## 1.10.5 ### July 21, 2022 -SECURITY: - -* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - CHANGES: * core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] @@ -2102,26 +844,9 @@ operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-1 * ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] * ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] -## 1.9.10 -### September 30, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] - -BUG FIXES: - -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - ## 1.9.9 ### August 31, 2022 -SECURITY: - -* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - CHANGES: * core: Bump Go version to 1.17.13. @@ -2135,17 +860,9 @@ BUG FIXES: * ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] * ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - ## 1.9.8 ### July 21, 2022 -SECURITY: - -* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - CHANGES: * core: Bump Go version to 1.17.12. diff --git a/CODEOWNERS b/CODEOWNERS index e13a6b5398222..745562e547280 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -25,12 +25,12 @@ /plugins/ @hashicorp/vault-ecosystem /vault/plugin_catalog.go @hashicorp/vault-ecosystem -/website/content/ @hashicorp/vault-education-approvers -/website/content/docs/plugin-portal.mdx @acahn @hashicorp/vault-education-approvers +/website/content/ @taoism4504 +/website/content/docs/plugin-portal.mdx @taoism4504 @acahn # Plugin docs -/website/content/docs/plugins/ @fairclothjm @hashicorp/vault-education-approvers -/website/content/docs/upgrading/plugins.mdx @fairclothjm @hashicorp/vault-education-approvers +/website/content/docs/plugins/ @taoism4504 @fairclothjm +/website/content/docs/upgrading/plugins.mdx @taoism4504 @fairclothjm # UI code related to Vault's JWT/OIDC auth method and OIDC provider. # Changes to these files often require coordination with backend code, @@ -45,40 +45,3 @@ # Quality engineering /.github/ @hashicorp/quality-team /enos/ @hashicorp/quality-team - -# Cryptosec -/builtin/logical/pki/ @hashicorp/vault-crypto -/builtin/logical/pkiext/ @hashicorp/vault-crypto -/website/content/docs/secrets/pki/ @hashicorp/vault-crypto -/website/content/api-docs/secret/pki.mdx @hashicorp/vault-crypto -/builtin/credential/cert/ @hashicorp/vault-crypto -/website/content/docs/auth/cert.mdx @hashicorp/vault-crypto -/website/content/api-docs/auth/cert.mdx @hashicorp/vault-crypto -/builtin/logical/ssh/ @hashicorp/vault-crypto -/website/content/docs/secrets/ssh/ @hashicorp/vault-crypto -/website/content/api-docs/secret/ssh.mdx @hashicorp/vault-crypto -/builtin/logical/transit/ @hashicorp/vault-crypto -/website/content/docs/secrets/transit/ @hashicorp/vault-crypto -/website/content/api-docs/secret/transit.mdx @hashicorp/vault-crypto -/helper/random/ @hashicorp/vault-crypto -/sdk/helper/certutil/ @hashicorp/vault-crypto -/sdk/helper/cryptoutil/ @hashicorp/vault-crypto -/sdk/helper/kdf/ @hashicorp/vault-crypto -/sdk/helper/keysutil/ @hashicorp/vault-crypto -/sdk/helper/ocsp/ @hashicorp/vault-crypto -/sdk/helper/salt/ @hashicorp/vault-crypto -/sdk/helper/tlsutil/ @hashicorp/vault-crypto -/shamir/ @hashicorp/vault-crypto -/vault/barrier* @hashicorp/vault-crypto -/vault/managed_key* @hashicorp/vault-crypto -/vault/seal* @hashicorp/vault-crypto -/vault/seal/ @hashicorp/vault-crypto -/website/content/docs/configuration/seal/ @hashicorp/vault-crypto -/website/content/docs/enterprise/sealwrap.mdx @hashicorp/vault-crypto -/website/content/api-docs/system/sealwrap-rewrap.mdx @hashicorp/vault-crypto -/website/content/docs/secrets/transform/ @hashicorp/vault-crypto -/website/content/api-docs/secret/transform.mdx @hashicorp/vault-crypto -/website/content/docs/secrets/kmip-profiles.mdx @hashicorp/vault-crypto -/website/content/docs/secrets/kmip.mdx @hashicorp/vault-crypto -/website/content/api-docs/secret/kmip.mdx @hashicorp/vault-crypto -/website/content/docs/enterprise/fips/ @hashicorp/vault-crypto diff --git a/Dockerfile b/Dockerfile index d088623b834ba..5d4ce5f581999 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - ## DOCKERHUB DOCKERFILE ## FROM alpine:3.15 as default diff --git a/LICENSE b/LICENSE index f4f97ee5853a2..e87a115e462e1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,3 @@ -Copyright (c) 2015 HashiCorp, Inc. - Mozilla Public License, version 2.0 1. Definitions diff --git a/Makefile b/Makefile index 82fed7a838830..38a305a442855 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) SED?=$(shell command -v gsed || command -v sed) GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version) -PROTOC_VERSION_MIN=3.21.12 +PROTOC_VERSION_MIN=3.21.5 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) @@ -111,49 +111,6 @@ vet: echo "and fix them if necessary before submitting the code for reviewal."; \ fi -# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it -# has deprecated function, variable, constant or field -deprecations: - make bootstrap - repositoryName=$(basename `git rev-parse --show-toplevel`) - ./scripts/deprecations-checker.sh "" repositoryName - -# ci-deprecations runs staticcheck tool to look for deprecations. All output gets piped to revgrep -# which will only return an error if changes that is not on main has deprecated function, variable, constant or field -ci-deprecations: - make bootstrap - repositoryName=$(basename `git rev-parse --show-toplevel`) - ./scripts/deprecations-checker.sh main repositoryName - -# tools/godoctests/.bin/godoctests builds the custom analyzer to check for godocs for tests -tools/godoctests/.bin/godoctests: - @cd tools/godoctests && $(GO_CMD) build -o .bin/godoctests . - -# vet-godoctests runs godoctests on the test functions. All output gets piped to revgrep -# which will only return an error if a new function is missing a godoc -vet-godoctests: bootstrap tools/godoctests/.bin/godoctests - @$(GO_CMD) vet -vettool=./tools/godoctests/.bin/godoctests $(TEST) 2>&1 | revgrep - -# ci-vet-godoctests runs godoctests on the test functions. All output gets piped to revgrep -# which will only return an error if a new function that is not on main is missing a godoc -ci-vet-godoctests: ci-bootstrap tools/godoctests/.bin/godoctests - @$(GO_CMD) vet -vettool=./tools/godoctests/.bin/godoctests $(TEST) 2>&1 | revgrep origin/main - -# tools/gonilnilfunctions/.bin/gonilnilfunctions builds the custom analyzer to check for nil, nil function returns -tools/gonilnilfunctions/.bin/gonilnilfunctions: - @cd tools/gonilnilfunctions && $(GO_CMD) build -o .bin/gonilnilfunctions . - -# vet-gonilnilfunctions runs gonilnilfunctions on functions. All output gets piped to revgrep -# which will only return an error if a new function returns nil, nil (where one of the nils could be an error) -vet-gonilnilfunctions: bootstrap tools/gonilnilfunctions/.bin/gonilnilfunctions - @$(GO_CMD) vet -vettool=./tools/gonilnilfunctions/.bin/gonilnilfunctions ./... 2>&1 | revgrep - -# ci-vet-gonilnilfunctions runs gonilnilfunctions on functions. All output gets piped to revgrep -# which will only return an error if a new function that is not on main has an issue -ci-vet-gonilnilfunctions: ci-bootstrap tools/gonilnilfunctions/.bin/gonilnilfunctions - @$(GO_CMD) vet -vettool=./tools/gonilnilfunctions/.bin/gonilnilfunctions ./... 2>&1 | revgrep origin/main - - # lint runs vet plus a number of other checkers, it is more comprehensive, but louder lint: @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ @@ -194,7 +151,7 @@ static-assets-dir: install-ui-dependencies: @echo "--> Installing JavaScript assets" - @cd ui && yarn + @cd ui && yarn --ignore-optional test-ember: install-ui-dependencies @echo "--> Running ember tests" @@ -244,8 +201,8 @@ proto: bootstrap # No additional sed expressions should be added to this list. Going forward # we should just use the variable names choosen by protobuf. These are left # here for backwards compatability, namely for SDK compilation. - $(SED) -i -e 's/Id/ID/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' vault/request_forwarding_service.pb.go - $(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go + $(SED) -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go + $(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go # This will inject the sentinel struct tags as decorated in the proto files. protoc-go-inject-tag -input=./helper/identity/types.pb.go @@ -296,7 +253,7 @@ hana-database-plugin: mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin -.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-godoctests ci-vet-godoctests vet-gonilnilfunctions ci-vet-gonilnilfunctions +.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-godoctests ci-vet-godoctests .NOTPARALLEL: ember-dist ember-dist-dev diff --git a/api/api_test.go b/api/api_test.go index 8bf69e0de97a6..e4ba3153203eb 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/auth.go b/api/auth.go index c1ef7a77989d5..fa92de4b3fd33 100644 --- a/api/auth.go +++ b/api/auth.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -66,7 +63,7 @@ func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[s return nil, fmt.Errorf("secret does not contain MFARequirements") } - s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.MFARequestID, payload) + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.GetMFARequestID(), payload) if err != nil { return nil, err } diff --git a/api/auth/approle/approle.go b/api/auth/approle/approle.go index 10d26b610f42e..b8cf012284418 100644 --- a/api/auth/approle/approle.go +++ b/api/auth/approle/approle.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( diff --git a/api/auth/approle/approle_test.go b/api/auth/approle/approle_test.go index cdfb4e285e792..f2628c695cc27 100644 --- a/api/auth/approle/approle_test.go +++ b/api/auth/approle/approle_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( diff --git a/api/auth/approle/go.mod b/api/auth/approle/go.mod index a1d1f47851477..b3d871b887ad6 100644 --- a/api/auth/approle/go.mod +++ b/api/auth/approle/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/approle go 1.16 -require github.com/hashicorp/vault/api v1.9.2 +require github.com/hashicorp/vault/api v1.8.0 diff --git a/api/auth/approle/go.sum b/api/auth/approle/go.sum index dfd66b2ccb247..c7835a3bf723f 100644 --- a/api/auth/approle/go.sum +++ b/api/auth/approle/go.sum @@ -1,46 +1,160 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -50,68 +164,169 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/aws/aws.go b/api/auth/aws/aws.go index f2aa9be1d00c4..44a4f6db1f584 100644 --- a/api/auth/aws/aws.go +++ b/api/auth/aws/aws.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( diff --git a/api/auth/aws/go.mod b/api/auth/aws/go.mod index 86261e766a14b..6ccdb0f4e1dde 100644 --- a/api/auth/aws/go.mod +++ b/api/auth/aws/go.mod @@ -7,5 +7,5 @@ require ( github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/vault/api v1.9.2 + github.com/hashicorp/vault/api v1.8.0 ) diff --git a/api/auth/aws/go.sum b/api/auth/aws/go.sum index 4d30b11ac2787..0d29275b814c1 100644 --- a/api/auth/aws/go.sum +++ b/api/auth/aws/go.sum @@ -1,60 +1,167 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.30.27 h1:9gPjZWVDSoQrBO2AvqrWObS6KAZByfEJxQoCYo4ZfK0= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -65,78 +172,177 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/azure/azure.go b/api/auth/azure/azure.go index b682195701150..a09d15a147214 100644 --- a/api/auth/azure/azure.go +++ b/api/auth/azure/azure.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package azure import ( diff --git a/api/auth/azure/go.mod b/api/auth/azure/go.mod index 3a192f3e06263..becdcd90e12a0 100644 --- a/api/auth/azure/go.mod +++ b/api/auth/azure/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/azure go 1.16 -require github.com/hashicorp/vault/api v1.9.2 +require github.com/hashicorp/vault/api v1.8.0 diff --git a/api/auth/azure/go.sum b/api/auth/azure/go.sum index dfd66b2ccb247..c7835a3bf723f 100644 --- a/api/auth/azure/go.sum +++ b/api/auth/azure/go.sum @@ -1,46 +1,160 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -50,68 +164,169 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/gcp/gcp.go b/api/auth/gcp/gcp.go index 2d6ef842a4b43..a5dd936461281 100644 --- a/api/auth/gcp/gcp.go +++ b/api/auth/gcp/gcp.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package gcp import ( diff --git a/api/auth/gcp/go.mod b/api/auth/gcp/go.mod index 7509753daf55e..cb122ae9cfe30 100644 --- a/api/auth/gcp/go.mod +++ b/api/auth/gcp/go.mod @@ -4,7 +4,6 @@ go 1.16 require ( cloud.google.com/go v0.97.0 - github.com/hashicorp/vault/api v1.9.2 + github.com/hashicorp/vault/api v1.8.0 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 - google.golang.org/grpc v1.41.0 // indirect ) diff --git a/api/auth/gcp/go.sum b/api/auth/gcp/go.sum index 35da60ad89ecb..a9fda24c58efb 100644 --- a/api/auth/gcp/go.sum +++ b/api/auth/gcp/go.sum @@ -46,9 +46,21 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= @@ -58,12 +70,15 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -76,16 +91,28 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -119,6 +146,8 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -132,9 +161,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -163,40 +192,79 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -206,36 +274,74 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -245,15 +351,16 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -289,9 +396,10 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -299,6 +407,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -324,11 +433,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -356,13 +462,15 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -406,14 +514,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 h1:6wSTsvPddg9gc/mVEEyk9oOAoxn+bT4Z9q1zx+4RwA4= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -421,10 +524,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -481,7 +582,6 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -524,6 +624,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -580,6 +681,7 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 h1:5Tbluzus3QxoAJx4IefGt1W0HQZW4nuMrVk684jI74Q= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -622,11 +724,18 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/kubernetes/go.mod b/api/auth/kubernetes/go.mod index 107628276e5d8..0faeebfcc91aa 100644 --- a/api/auth/kubernetes/go.mod +++ b/api/auth/kubernetes/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/kubernetes go 1.16 -require github.com/hashicorp/vault/api v1.9.2 +require github.com/hashicorp/vault/api v1.8.0 diff --git a/api/auth/kubernetes/go.sum b/api/auth/kubernetes/go.sum index dfd66b2ccb247..c7835a3bf723f 100644 --- a/api/auth/kubernetes/go.sum +++ b/api/auth/kubernetes/go.sum @@ -1,46 +1,160 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -50,68 +164,169 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/kubernetes/kubernetes.go b/api/auth/kubernetes/kubernetes.go index f0e38c17a2b47..c2fef86a5fd0e 100644 --- a/api/auth/kubernetes/kubernetes.go +++ b/api/auth/kubernetes/kubernetes.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( diff --git a/api/auth/ldap/go.mod b/api/auth/ldap/go.mod index 4bfa972ebf0fb..15b683c2696ab 100644 --- a/api/auth/ldap/go.mod +++ b/api/auth/ldap/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/ldap go 1.16 -require github.com/hashicorp/vault/api v1.9.2 +require github.com/hashicorp/vault/api v1.8.0 diff --git a/api/auth/ldap/go.sum b/api/auth/ldap/go.sum index dfd66b2ccb247..c7835a3bf723f 100644 --- a/api/auth/ldap/go.sum +++ b/api/auth/ldap/go.sum @@ -1,46 +1,160 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -50,68 +164,169 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/ldap/ldap.go b/api/auth/ldap/ldap.go index fdf1a38dd0c11..9f37abc664f7f 100644 --- a/api/auth/ldap/ldap.go +++ b/api/auth/ldap/ldap.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( diff --git a/api/auth/ldap/ldap_test.go b/api/auth/ldap/ldap_test.go index abdccb0358356..8633c4dfac116 100644 --- a/api/auth/ldap/ldap_test.go +++ b/api/auth/ldap/ldap_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( diff --git a/api/auth/userpass/go.mod b/api/auth/userpass/go.mod index 194bc6c952b71..5f907553bd35f 100644 --- a/api/auth/userpass/go.mod +++ b/api/auth/userpass/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/userpass go 1.16 -require github.com/hashicorp/vault/api v1.9.2 +require github.com/hashicorp/vault/api v1.8.0 diff --git a/api/auth/userpass/go.sum b/api/auth/userpass/go.sum index dfd66b2ccb247..c7835a3bf723f 100644 --- a/api/auth/userpass/go.sum +++ b/api/auth/userpass/go.sum @@ -1,46 +1,160 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= -github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -50,68 +164,169 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/auth/userpass/userpass.go b/api/auth/userpass/userpass.go index 3e8942953d3a8..124cd7a68f8e9 100644 --- a/api/auth/userpass/userpass.go +++ b/api/auth/userpass/userpass.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( diff --git a/api/auth/userpass/userpass_test.go b/api/auth/userpass/userpass_test.go index 4fe68d8d4ef12..0728117a1e8c4 100644 --- a/api/auth/userpass/userpass_test.go +++ b/api/auth/userpass/userpass_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( diff --git a/api/auth_test.go b/api/auth_test.go index ca69630cce5e9..46113d92f7beb 100644 --- a/api/auth_test.go +++ b/api/auth_test.go @@ -1,11 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( "context" "testing" + + "github.com/hashicorp/vault/sdk/logical" ) type mockAuthMethod struct { @@ -92,7 +91,7 @@ func TestAuth_MFALoginTwoPhase(t *testing.T) { m: &mockAuthMethod{ mockedSecret: &Secret{ Auth: &SecretAuth{ - MFARequirement: &MFARequirement{ + MFARequirement: &logical.MFARequirement{ MFARequestID: "a-req-id", MFAConstraints: nil, }, diff --git a/api/auth_token.go b/api/auth_token.go index 1980be06ef5b9..52be1e7852b94 100644 --- a/api/auth_token.go +++ b/api/auth_token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/client.go b/api/client.go index d20477e1d9d7b..c6843348e58e5 100644 --- a/api/client.go +++ b/api/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -27,9 +24,12 @@ import ( "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/go-secure-stdlib/strutil" "golang.org/x/net/http2" "golang.org/x/time/rate" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" ) const ( @@ -56,19 +56,7 @@ const ( HeaderIndex = "X-Vault-Index" HeaderForward = "X-Vault-Forward" HeaderInconsistent = "X-Vault-Inconsistent" - - // NamespaceHeaderName is the header set to specify which namespace the - // request is indented for. - NamespaceHeaderName = "X-Vault-Namespace" - - // AuthHeaderName is the name of the header containing the token. - AuthHeaderName = "X-Vault-Token" - - // RequestHeaderName is the name of the header used by the Agent for - // SSRF protection. - RequestHeaderName = "X-Vault-Request" - - TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + "but the client is configured to use TLS. Please either enable TLS\n" + "on the server or run the client with -address set to an address\n" + "that uses the http protocol:\n\n" + @@ -126,11 +114,7 @@ type Config struct { // of three tries). MaxRetries int - // Timeout, given a non-negative value, will apply the request timeout - // to each request function unless an earlier deadline is passed to the - // request function through context.Context. Note that this timeout is - // not applicable to Logical().ReadRaw* (raw response) functions. - // Defaults to 60 seconds. + // Timeout is for setting custom timeout parameter in the HttpClient Timeout time.Duration // If there is an error when creating the configuration, this will be the @@ -203,7 +187,6 @@ type Config struct { // commands such as 'vault operator raft snapshot' as this redirects to the // primary node. DisableRedirects bool - clientTLSConfig *tls.Config } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -338,17 +321,10 @@ func (c *Config) configureTLS(t *TLSConfig) error { if t.TLSServerName != "" { clientTLSConfig.ServerName = t.TLSServerName } - c.clientTLSConfig = clientTLSConfig return nil } -func (c *Config) TLSConfig() *tls.Config { - c.modifyLock.RLock() - defer c.modifyLock.RUnlock() - return c.clientTLSConfig.Clone() -} - // ConfigureTLS takes a set of TLS configurations and applies those to the // HTTP client. func (c *Config) ConfigureTLS(t *TLSConfig) error { @@ -641,7 +617,7 @@ func NewClient(c *Config) (*Client, error) { } // Add the VaultRequest SSRF protection header - client.headers[RequestHeaderName] = []string{"true"} + client.headers[consts.RequestHeaderName] = []string{"true"} if token := os.Getenv(EnvVaultToken); token != "" { client.token = token @@ -673,7 +649,6 @@ func (c *Client) CloneConfig() *Config { newConfig.CloneHeaders = c.config.CloneHeaders newConfig.CloneToken = c.config.CloneToken newConfig.ReadYourWrites = c.config.ReadYourWrites - newConfig.clientTLSConfig = c.config.clientTLSConfig // we specifically want a _copy_ of the client here, not a pointer to the original one newClient := *c.config.HttpClient @@ -959,7 +934,7 @@ func (c *Client) setNamespace(namespace string) { c.headers = make(http.Header) } - c.headers.Set(NamespaceHeaderName, namespace) + c.headers.Set(consts.NamespaceHeaderName, namespace) } // ClearNamespace removes the namespace header if set. @@ -967,7 +942,7 @@ func (c *Client) ClearNamespace() { c.modifyLock.Lock() defer c.modifyLock.Unlock() if c.headers != nil { - c.headers.Del(NamespaceHeaderName) + c.headers.Del(consts.NamespaceHeaderName) } } @@ -979,7 +954,7 @@ func (c *Client) Namespace() string { if c.headers == nil { return "" } - return c.headers.Get(NamespaceHeaderName) + return c.headers.Get(consts.NamespaceHeaderName) } // WithNamespace makes a shallow copy of Client, modifies it to use @@ -1280,9 +1255,8 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. // -// Deprecated: RawRequest exists for historical compatibility and should not be -// used directly. Use client.Logical().ReadRaw(...) or higher level methods -// instead. +// Deprecated: This method should not be used directly. Use higher level +// methods instead. func (c *Client) RawRequest(r *Request) (*Response, error) { return c.RawRequestWithContext(context.Background(), r) } @@ -1291,9 +1265,8 @@ func (c *Client) RawRequest(r *Request) (*Response, error) { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. // -// Deprecated: RawRequestWithContext exists for historical compatibility and -// should not be used directly. Use client.Logical().ReadRawWithContext(...) -// or higher level methods instead. +// Deprecated: This method should not be used directly. Use higher level +// methods instead. func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { // Note: we purposefully do not call cancel manually. The reason is // when canceled, the request.Body will EOF when reading due to the way @@ -1315,7 +1288,7 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon checkRetry := c.config.CheckRetry backoff := c.config.Backoff httpClient := c.config.HttpClient - ns := c.headers.Get(NamespaceHeaderName) + ns := c.headers.Get(consts.NamespaceHeaderName) outputCurlString := c.config.OutputCurlString outputPolicy := c.config.OutputPolicy logger := c.config.Logger @@ -1328,9 +1301,9 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon // e.g. calls using (*Client).WithNamespace switch ns { case "": - r.Headers.Del(NamespaceHeaderName) + r.Headers.Del(consts.NamespaceHeaderName) default: - r.Headers.Set(NamespaceHeaderName, ns) + r.Headers.Set(consts.NamespaceHeaderName, ns) } for _, cb := range c.requestCallbacks { @@ -1376,7 +1349,6 @@ START: LastOutputPolicyError = &OutputPolicyError{ method: req.Method, path: strings.TrimPrefix(req.URL.Path, "/v1"), - params: req.URL.Query(), } return nil, LastOutputPolicyError } @@ -1484,8 +1456,8 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo } } // explicitly set the namespace header to current client - if ns := c.headers.Get(NamespaceHeaderName); ns != "" { - r.Headers.Set(NamespaceHeaderName, ns) + if ns := c.headers.Get(consts.NamespaceHeaderName); ns != "" { + r.Headers.Set(consts.NamespaceHeaderName, ns) } } @@ -1506,7 +1478,7 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo req.Host = r.URL.Host if len(r.ClientToken) != 0 { - req.Header.Set(AuthHeaderName, r.ClientToken) + req.Header.Set(consts.AuthHeaderName, r.ClientToken) } if len(r.WrapTTL) != 0 { @@ -1696,13 +1668,7 @@ func MergeReplicationStates(old []string, new string) []string { return strutil.RemoveDuplicates(ret, false) } -type WALState struct { - ClusterID string - LocalIndex uint64 - ReplicatedIndex uint64 -} - -func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) { +func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) { cooked, err := base64.StdEncoding.DecodeString(raw) if err != nil { return nil, err @@ -1740,7 +1706,7 @@ func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) { return nil, fmt.Errorf("invalid replicated index in state header: %w", err) } - return &WALState{ + return &logical.WALState{ ClusterID: pieces[1], LocalIndex: localIndex, ReplicatedIndex: replicatedIndex, diff --git a/api/client_test.go b/api/client_test.go index a23c0c19e7d58..4f57451753a08 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -22,6 +19,7 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/consts" ) func init() { @@ -445,7 +443,7 @@ func TestClientDeprecatedEnvSettings(t *testing.T) { func TestClientEnvNamespace(t *testing.T) { var seenNamespace string handler := func(w http.ResponseWriter, req *http.Request) { - seenNamespace = req.Header.Get(NamespaceHeaderName) + seenNamespace = req.Header.Get(consts.NamespaceHeaderName) } config, ln := testHTTPServer(t, http.HandlerFunc(handler)) defer ln.Close() @@ -1270,7 +1268,7 @@ func TestClient_SetCloneToken(t *testing.T) { func TestClientWithNamespace(t *testing.T) { var ns string handler := func(w http.ResponseWriter, req *http.Request) { - ns = req.Header.Get(NamespaceHeaderName) + ns = req.Header.Get(consts.NamespaceHeaderName) } config, ln := testHTTPServer(t, http.HandlerFunc(handler)) defer ln.Close() diff --git a/api/go.mod b/api/go.mod index 20fb4617af23b..4627c5a5518ed 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,15 +1,11 @@ module github.com/hashicorp/vault/api -// The Go version directive for the api package should normally only be updated when -// code in the api package requires a newer Go version to build. It should not -// automatically track the Go version used to build Vault itself. Many projects import -// the api module and we don't want to impose a newer version on them any more than we -// have to. -go 1.19 +go 1.17 + +replace github.com/hashicorp/vault/sdk => ../sdk require ( github.com/cenkalti/backoff/v3 v3.0.0 - github.com/go-jose/go-jose/v3 v3.0.0 github.com/go-test/deep v1.0.2 github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -18,22 +14,44 @@ require ( github.com/hashicorp/go-retryablehttp v0.6.6 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 - github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/hcl v1.0.0 + github.com/hashicorp/vault/sdk v0.6.0 github.com/mitchellh/mapstructure v1.5.0 golang.org/x/net v0.7.0 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 + gopkg.in/square/go-jose.v2 v2.5.1 ) require ( + github.com/armon/go-metrics v0.3.9 // indirect + github.com/armon/go-radix v1.0.0 // indirect github.com/fatih/color v1.7.0 // indirect - github.com/google/go-cmp v0.5.7 // indirect + github.com/frankban/quicktest v1.13.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-plugin v1.4.3 // indirect + github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-version v1.2.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect github.com/mattn/go-colorable v0.1.6 // indirect github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/ryanuber/go-glob v1.0.0 // indirect + go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.6.0 // indirect golang.org/x/sys v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect + google.golang.org/grpc v1.41.0 // indirect + google.golang.org/protobuf v1.26.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index e8f5f1811f8f4..c81062b56dd13 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,44 +1,156 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -48,46 +160,188 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/help.go b/api/help.go index c119f6c3c9535..0988ebcd1fc9d 100644 --- a/api/help.go +++ b/api/help.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/kv.go b/api/kv.go index 20862fbfdf1d9..37699df266f9f 100644 --- a/api/kv.go +++ b/api/kv.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import "errors" diff --git a/api/kv_test.go b/api/kv_test.go index 36d769feaa6b6..2c16b2d43ad66 100644 --- a/api/kv_test.go +++ b/api/kv_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -322,66 +319,13 @@ func TestExtractCustomMetadata(t *testing.T) { }, expected: map[string]interface{}{"org": "eng"}, }, - { - name: "a read response with no custom metadata from a pre-1.9 Vault server", - inputAPIResp: &Secret{ - Data: map[string]interface{}{ - "metadata": map[string]interface{}{}, - }, - }, - expected: map[string]interface{}(nil), - }, - { - name: "a write response with no custom metadata from a pre-1.9 Vault server", - inputAPIResp: &Secret{ - Data: map[string]interface{}{}, - }, - expected: map[string]interface{}(nil), - }, - { - name: "a read response with no custom metadata from a post-1.9 Vault server", - inputAPIResp: &Secret{ - Data: map[string]interface{}{ - "metadata": map[string]interface{}{ - "custom_metadata": nil, - }, - }, - }, - expected: map[string]interface{}(nil), - }, - { - name: "a write response with no custom metadata from a post-1.9 Vault server", - inputAPIResp: &Secret{ - Data: map[string]interface{}{ - "custom_metadata": nil, - }, - }, - expected: map[string]interface{}(nil), - }, - { - name: "a read response where custom metadata was deleted", - inputAPIResp: &Secret{ - Data: map[string]interface{}{ - "metadata": map[string]interface{}{ - "custom_metadata": map[string]interface{}{}, - }, - }, - }, - expected: map[string]interface{}{}, - }, - { - name: "a write response where custom metadata was deleted", - inputAPIResp: &Secret{ - Data: map[string]interface{}{ - "custom_metadata": map[string]interface{}{}, - }, - }, - expected: map[string]interface{}{}, - }, } for _, tc := range testCases { - cm := extractCustomMetadata(tc.inputAPIResp) + cm, err := extractCustomMetadata(tc.inputAPIResp) + if err != nil { + t.Fatalf("err: %s", err) + } if !reflect.DeepEqual(cm, tc.expected) { t.Fatalf("%s: got\n%#v\nexpected\n%#v\n", tc.name, cm, tc.expected) diff --git a/api/kv_v1.go b/api/kv_v1.go index a914e03576e86..22ba992384b79 100644 --- a/api/kv_v1.go +++ b/api/kv_v1.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/kv_v2.go b/api/kv_v2.go index 72c29eaa42612..7a98cfeefd234 100644 --- a/api/kv_v2.go +++ b/api/kv_v2.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -128,7 +125,11 @@ func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) } - kvSecret.CustomMetadata = extractCustomMetadata(secret) + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err) + } + kvSecret.CustomMetadata = cm return kvSecret, nil } @@ -158,7 +159,11 @@ func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) } - kvSecret.CustomMetadata = extractCustomMetadata(secret) + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err) + } + kvSecret.CustomMetadata = cm return kvSecret, nil } @@ -255,7 +260,11 @@ func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]inte Raw: secret, } - kvSecret.CustomMetadata = extractCustomMetadata(secret) + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToWriteTo, err) + } + kvSecret.CustomMetadata = cm return kvSecret, nil } @@ -488,24 +497,30 @@ func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) return kvs, nil } -func extractCustomMetadata(secret *Secret) map[string]interface{} { +func extractCustomMetadata(secret *Secret) (map[string]interface{}, error) { // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key customMetadataInterface, ok := secret.Data["custom_metadata"] if !ok { - metadataInterface := secret.Data["metadata"] + metadataInterface, ok := secret.Data["metadata"] + if !ok { // if that's not found, bail since it should have had one or the other + return nil, fmt.Errorf("secret is missing expected fields") + } metadataMap, ok := metadataInterface.(map[string]interface{}) if !ok { - return nil + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + } + customMetadataInterface, ok = metadataMap["custom_metadata"] + if !ok { + return nil, fmt.Errorf("metadata missing expected field \"custom_metadata\": %v", metadataMap) } - customMetadataInterface = metadataMap["custom_metadata"] } cm, ok := customMetadataInterface.(map[string]interface{}) - if !ok { - return nil + if !ok && customMetadataInterface != nil { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", customMetadataInterface, customMetadataInterface) } - return cm + return cm, nil } func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) { @@ -709,7 +724,11 @@ func mergePatch(ctx context.Context, client *Client, mountPath string, secretPat Raw: secret, } - kvSecret.CustomMetadata = extractCustomMetadata(secret) + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret %s: %w", secretPath, err) + } + kvSecret.CustomMetadata = cm return kvSecret, nil } diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go index 5c060e5a15099..5f3eadbffdd88 100644 --- a/api/lifetime_watcher.go +++ b/api/lifetime_watcher.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -150,13 +147,6 @@ func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, random := i.Rand if random == nil { - // NOTE: - // Rather than a cryptographically secure random number generator (RNG), - // the default behavior uses the math/rand package. The random number is - // used to introduce a slight jitter when calculating the grace period - // for a monitored secret monitoring. This is intended to stagger renewal - // requests to the Vault server, but in a semi-predictable way, so there - // is no need to use a cryptographically secure RNG. random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) } @@ -347,14 +337,24 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, var sleepDuration time.Duration - if errorBackoff == nil { - sleepDuration = r.calculateSleepDuration(remainingLeaseDuration, priorDuration) - } else if errorBackoff.NextBackOff() == backoff.Stop { - return err - } + if errorBackoff != nil { + sleepDuration = errorBackoff.NextBackOff() + if sleepDuration == backoff.Stop { + return err + } + } else { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) + } + priorDuration = remainingLeaseDuration - // remainingLeaseDuration becomes the priorDuration for the next loop - priorDuration = remainingLeaseDuration + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + } // If we are within grace, return now; or, if the amount of time we // would sleep would land us in the grace period. This helps with short @@ -366,32 +366,15 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, return nil } - timer := time.NewTimer(sleepDuration) select { case <-r.stopCh: - timer.Stop() return nil - case <-timer.C: + case <-time.After(sleepDuration): continue } } } -// calculateSleepDuration calculates the amount of time the LifeTimeWatcher should sleep -// before re-entering its loop. -func (r *LifetimeWatcher) calculateSleepDuration(remainingLeaseDuration, priorDuration time.Duration) time.Duration { - // We keep evaluating a new grace period so long as the lease is - // extending. Once it stops extending, we've hit the max and need to - // rely on the grace duration. - if remainingLeaseDuration > priorDuration { - r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) - } - - // The sleep duration is set to 2/3 of the current lease duration plus - // 1/3 of the current grace period, which adds jitter. - return time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) -} - // calculateGrace calculates the grace period based on the minimum of the // remaining lease duration and the token increment value; it also adds some // jitter to not have clients be in sync. diff --git a/api/logical.go b/api/logical.go index 927dd168e4404..747b9bc12c4c9 100644 --- a/api/logical.go +++ b/api/logical.go @@ -1,12 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( "bytes" "context" - "encoding/json" "fmt" "io" "net/http" @@ -15,6 +11,7 @@ import ( "strings" "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" ) const ( @@ -68,54 +65,23 @@ func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - resp, err := c.readRawWithDataWithContext(ctx, path, data) - return c.ParseRawResponseAndCloseBody(resp, err) -} - -// ReadRaw attempts to read the value stored at the given Vault path -// (without '/v1/' prefix) and returns a raw *http.Response. -// -// Note: the raw-response functions do not respect the client-configured -// request timeout; if a timeout is desired, please use ReadRawWithContext -// instead and set the timeout through context.WithTimeout or context.WithDeadline. -func (c *Logical) ReadRaw(path string) (*Response, error) { - return c.ReadRawWithDataWithContext(context.Background(), path, nil) -} - -// ReadRawWithContext attempts to read the value stored at the give Vault path -// (without '/v1/' prefix) and returns a raw *http.Response. -// -// Note: the raw-response functions do not respect the client-configured -// request timeout; if a timeout is desired, please set it through -// context.WithTimeout or context.WithDeadline. -func (c *Logical) ReadRawWithContext(ctx context.Context, path string) (*Response, error) { - return c.ReadRawWithDataWithContext(ctx, path, nil) -} + r := c.c.NewRequest(http.MethodGet, "/v1/"+path) -// ReadRawWithData attempts to read the value stored at the given Vault -// path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' map -// is added as query parameters to the request. -// -// Note: the raw-response functions do not respect the client-configured -// request timeout; if a timeout is desired, please use -// ReadRawWithDataWithContext instead and set the timeout through -// context.WithTimeout or context.WithDeadline. -func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { - return c.ReadRawWithDataWithContext(context.Background(), path, data) -} + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } -// ReadRawWithDataWithContext attempts to read the value stored at the given -// Vault path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' -// map is added as query parameters to the request. -// -// Note: the raw-response functions do not respect the client-configured -// request timeout; if a timeout is desired, please set it through -// context.WithTimeout or context.WithDeadline. -func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { - return c.readRawWithDataWithContext(ctx, path, data) -} + if values != nil { + r.Params = values + } -func (c *Logical) ParseRawResponseAndCloseBody(resp *Response, err error) (*Secret, error) { + resp, err := c.c.rawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } @@ -140,26 +106,6 @@ func (c *Logical) ParseRawResponseAndCloseBody(resp *Response, err error) (*Secr return ParseSecret(resp.Body) } -func (c *Logical) readRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { - r := c.c.NewRequest(http.MethodGet, "/v1/"+path) - - var values url.Values - for k, v := range data { - if values == nil { - values = make(url.Values) - } - for _, val := range v { - values.Add(k, val) - } - } - - if values != nil { - r.Params = values - } - - return c.c.RawRequestWithContext(ctx, r) -} - func (c *Logical) List(path string) (*Secret, error) { return c.ListWithContext(context.Background(), path) } @@ -393,9 +339,7 @@ func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) ( wrappedSecret := new(Secret) buf := bytes.NewBufferString(secret.Data["response"].(string)) - dec := json.NewDecoder(buf) - dec.UseNumber() - if err := dec.Decode(wrappedSecret); err != nil { + if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) } diff --git a/api/output_policy.go b/api/output_policy.go index c3ec522891b50..85d1617e5e943 100644 --- a/api/output_policy.go +++ b/api/output_policy.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( "fmt" "net/http" "net/url" - "strconv" "strings" ) @@ -20,7 +16,6 @@ var LastOutputPolicyError *OutputPolicyError type OutputPolicyError struct { method string path string - params url.Values finalHCLString string } @@ -49,22 +44,8 @@ func (d *OutputPolicyError) HCLString() (string, error) { // Builds a sample policy document from the request func (d *OutputPolicyError) buildSamplePolicy() (string, error) { - operation := d.method - // List is often defined as a URL param instead of as an http.Method - // this will check for the header and properly switch off of the intended functionality - if d.params.Has("list") { - isList, err := strconv.ParseBool(d.params.Get("list")) - if err != nil { - return "", fmt.Errorf("the value of the list url param is not a bool: %v", err) - } - - if isList { - operation = "LIST" - } - } - var capabilities []string - switch operation { + switch d.method { case http.MethodGet, "": capabilities = append(capabilities, "read") case http.MethodPost, http.MethodPut: @@ -78,15 +59,17 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) { capabilities = append(capabilities, "list") } + // sanitize, then trim the Vault address and v1 from the front of the path + path, err := url.PathUnescape(d.path) + if err != nil { + return "", fmt.Errorf("failed to unescape request URL characters: %v", err) + } + // determine whether to add sudo capability - if IsSudoPath(d.path) { + if IsSudoPath(path) { capabilities = append(capabilities, "sudo") } - return formatOutputPolicy(d.path, capabilities), nil -} - -func formatOutputPolicy(path string, capabilities []string) string { // the OpenAPI response has a / in front of each path, // but policies need the path without that leading slash path = strings.TrimLeft(path, "/") @@ -95,5 +78,5 @@ func formatOutputPolicy(path string, capabilities []string) string { return fmt.Sprintf( `path "%s" { capabilities = ["%s"] -}`, path, capStr) +}`, path, capStr), nil } diff --git a/api/output_policy_test.go b/api/output_policy_test.go deleted file mode 100644 index 2092e2ba2a016..0000000000000 --- a/api/output_policy_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "net/http" - "net/url" - "testing" -) - -func TestBuildSamplePolicy(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - req *OutputPolicyError - expected string - err error - }{ - { - "happy path", - &OutputPolicyError{ - method: http.MethodGet, - path: "/something", - }, - formatOutputPolicy("/something", []string{"read"}), - nil, - }, - { // test included to clear up some confusion around the sanitize comment - "demonstrate that this function does not format fully", - &OutputPolicyError{ - method: http.MethodGet, - path: "http://vault.test/v1/something", - }, - formatOutputPolicy("http://vault.test/v1/something", []string{"read"}), - nil, - }, - { // test that list is properly returned - "list over read returned", - &OutputPolicyError{ - method: http.MethodGet, - path: "/something", - params: url.Values{ - "list": []string{"true"}, - }, - }, - formatOutputPolicy("/something", []string{"list"}), - nil, - }, - { - "valid protected path", - &OutputPolicyError{ - method: http.MethodGet, - path: "/sys/config/ui/headers/", - }, - formatOutputPolicy("/sys/config/ui/headers/", []string{"read", "sudo"}), - nil, - }, - { // ensure that a formatted path that trims the trailing slash as the code does still works for recognizing a sudo path - "valid protected path no trailing /", - &OutputPolicyError{ - method: http.MethodGet, - path: "/sys/config/ui/headers", - }, - formatOutputPolicy("/sys/config/ui/headers", []string{"read", "sudo"}), - nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := tc.req.buildSamplePolicy() - if tc.err != err { - t.Fatalf("expected for the error to be %v instead got %v\n", tc.err, err) - } - - if tc.expected != result { - t.Fatalf("expected for the policy string to be %v instead got %v\n", tc.expected, result) - } - }) - } -} diff --git a/api/output_string.go b/api/output_string.go index d7777712d2095..80c591f20b5c7 100644 --- a/api/output_string.go +++ b/api/output_string.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/plugin_helpers.go b/api/plugin_helpers.go index 507b72c4c20de..2b1b35c3b593e 100644 --- a/api/plugin_helpers.go +++ b/api/plugin_helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -14,7 +11,7 @@ import ( "os" "regexp" - "github.com/go-jose/go-jose/v3/jwt" + squarejwt "gopkg.in/square/go-jose.v2/jwt" "github.com/hashicorp/errwrap" ) @@ -40,7 +37,7 @@ const ( // path matches that path or not (useful specifically for the paths that // contain templated fields.) var sudoPaths = map[string]*regexp.Regexp{ - "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/?$`), + "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), "/pki/root": regexp.MustCompile(`^/pki/root$`), "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), "/sys/audit": regexp.MustCompile(`^/sys/audit$`), @@ -50,10 +47,10 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), - "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/?$`), + "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), "/sys/leases": regexp.MustCompile(`^/sys/leases$`), - "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/?$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), @@ -66,14 +63,13 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), - "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), // enterprise-only paths "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), - "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), } @@ -135,7 +131,7 @@ func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) return func() (*tls.Config, error) { unwrapToken := os.Getenv(PluginUnwrapTokenEnv) - parsedJWT, err := jwt.ParseSigned(unwrapToken) + parsedJWT, err := squarejwt.ParseSigned(unwrapToken) if err != nil { return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) } diff --git a/api/plugin_helpers_test.go b/api/plugin_helpers_test.go index 7b3ddbf8154af..453720ea7a5aa 100644 --- a/api/plugin_helpers_test.go +++ b/api/plugin_helpers_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import "testing" diff --git a/api/plugin_types.go b/api/plugin_types.go deleted file mode 100644 index 4c759a2decc51..0000000000000 --- a/api/plugin_types.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -// NOTE: this file was copied from -// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go -// Any changes made should be made to both files at the same time. - -import "fmt" - -var PluginTypes = []PluginType{ - PluginTypeUnknown, - PluginTypeCredential, - PluginTypeDatabase, - PluginTypeSecrets, -} - -type PluginType uint32 - -// This is a list of PluginTypes used by Vault. -// If we need to add any in the future, it would -// be best to add them to the _end_ of the list below -// because they resolve to incrementing numbers, -// which may be saved in state somewhere. Thus if -// the name for one of those numbers changed because -// a value were added to the middle, that could cause -// the wrong plugin types to be read from storage -// for a given underlying number. Example of the problem -// here: https://play.golang.org/p/YAaPw5ww3er -const ( - PluginTypeUnknown PluginType = iota - PluginTypeCredential - PluginTypeDatabase - PluginTypeSecrets -) - -func (p PluginType) String() string { - switch p { - case PluginTypeUnknown: - return "unknown" - case PluginTypeCredential: - return "auth" - case PluginTypeDatabase: - return "database" - case PluginTypeSecrets: - return "secret" - default: - return "unsupported" - } -} - -func ParsePluginType(pluginType string) (PluginType, error) { - switch pluginType { - case "unknown": - return PluginTypeUnknown, nil - case "auth": - return PluginTypeCredential, nil - case "database": - return PluginTypeDatabase, nil - case "secret": - return PluginTypeSecrets, nil - default: - return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) - } -} diff --git a/api/renewer_test.go b/api/renewer_test.go index 7ba16e66eca24..3b28d8546d42d 100644 --- a/api/renewer_test.go +++ b/api/renewer_test.go @@ -1,15 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( "errors" "fmt" - "math/rand" - "reflect" "testing" - "testing/quick" "time" "github.com/go-test/deep" @@ -239,47 +233,3 @@ func TestLifetimeWatcher(t *testing.T) { }) } } - -// TestCalcSleepPeriod uses property based testing to evaluate the calculateSleepDuration -// function of LifeTimeWatchers, but also incidentally tests "calculateGrace". -// This is on account of "calculateSleepDuration" performing the "calculateGrace" -// function in particular instances. -// Both of these functions support the vital functionality of the LifeTimeWatcher -// and therefore should be tested rigorously. -func TestCalcSleepPeriod(t *testing.T) { - c := quick.Config{ - MaxCount: 10000, - Values: func(values []reflect.Value, r *rand.Rand) { - leaseDuration := r.Int63() - priorDuration := r.Int63n(leaseDuration) - remainingLeaseDuration := r.Int63n(priorDuration) - increment := r.Int63n(remainingLeaseDuration) - - values[0] = reflect.ValueOf(r) - values[1] = reflect.ValueOf(time.Duration(leaseDuration)) - values[2] = reflect.ValueOf(time.Duration(priorDuration)) - values[3] = reflect.ValueOf(time.Duration(remainingLeaseDuration)) - values[4] = reflect.ValueOf(time.Duration(increment)) - }, - } - - // tests that "calculateSleepDuration" will always return a value less than - // the remaining lease duration given a random leaseDuration, priorDuration, remainingLeaseDuration, and increment. - // Inputs are generated so that: - // leaseDuration > priorDuration > remainingLeaseDuration - // and remainingLeaseDuration > increment - if err := quick.Check(func(r *rand.Rand, leaseDuration, priorDuration, remainingLeaseDuration, increment time.Duration) bool { - lw := LifetimeWatcher{ - grace: 0, - increment: int(increment.Seconds()), - random: r, - } - - lw.calculateGrace(remainingLeaseDuration, increment) - - // ensure that we sleep for less than the remaining lease. - return lw.calculateSleepDuration(remainingLeaseDuration, priorDuration) < remainingLeaseDuration - }, &c); err != nil { - t.Error(err) - } -} diff --git a/api/replication_status.go b/api/replication_status.go deleted file mode 100644 index 1668daf19c127..0000000000000 --- a/api/replication_status.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/mitchellh/mapstructure" -) - -const ( - apiRepPerformanceStatusPath = "/v1/sys/replication/performance/status" - apiRepDRStatusPath = "/v1/sys/replication/dr/status" - apiRepStatusPath = "/v1/sys/replication/status" -) - -type ClusterInfo struct { - APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"` - ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"` - ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"` - LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"` - NodeID string `json:"node_id,omitempty" mapstructure:"node_id"` -} - -type ReplicationStatusGenericResponse struct { - LastDRWAL uint64 `json:"last_dr_wal,omitempty" mapstructure:"last_dr_wal"` - LastReindexEpoch string `json:"last_reindex_epoch,omitempty" mapstructure:"last_reindex_epoch"` - ClusterID string `json:"cluster_id,omitempty" mapstructure:"cluster_id"` - LastWAL uint64 `json:"last_wal,omitempty" mapstructure:"last_wal"` - MerkleRoot string `json:"merkle_root,omitempty" mapstructure:"merkle_root"` - Mode string `json:"mode,omitempty" mapstructure:"mode"` - PrimaryClusterAddr string `json:"primary_cluster_addr,omitempty" mapstructure:"primary_cluster_addr"` - LastPerformanceWAL uint64 `json:"last_performance_wal,omitempty" mapstructure:"last_performance_wal"` - State string `json:"state,omitempty" mapstructure:"state"` - LastRemoteWAL uint64 `json:"last_remote_wal,omitempty" mapstructure:"last_remote_wal"` - SecondaryID string `json:"secondary_id,omitempty" mapstructure:"secondary_id"` - SSCTGenerationCounter uint64 `json:"ssct_generation_counter,omitempty" mapstructure:"ssct_generation_counter"` - - KnownSecondaries []string `json:"known_secondaries,omitempty" mapstructure:"known_secondaries"` - KnownPrimaryClusterAddrs []string `json:"known_primary_cluster_addrs,omitempty" mapstructure:"known_primary_cluster_addrs"` - Primaries []ClusterInfo `json:"primaries,omitempty" mapstructure:"primaries"` - Secondaries []ClusterInfo `json:"secondaries,omitempty" mapstructure:"secondaries"` -} - -type ReplicationStatusResponse struct { - DR ReplicationStatusGenericResponse `json:"dr,omitempty" mapstructure:"dr"` - Performance ReplicationStatusGenericResponse `json:"performance,omitempty" mapstructure:"performance"` -} - -func (c *Sys) ReplicationStatus() (*ReplicationStatusResponse, error) { - return c.ReplicationStatusWithContext(context.Background(), apiRepStatusPath) -} - -func (c *Sys) ReplicationPerformanceStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { - s, err := c.ReplicationStatusWithContext(ctx, apiRepPerformanceStatusPath) - if err != nil { - return nil, err - } - - return &s.Performance, nil -} - -func (c *Sys) ReplicationDRStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { - s, err := c.ReplicationStatusWithContext(ctx, apiRepDRStatusPath) - if err != nil { - return nil, err - } - - return &s.DR, nil -} - -func (c *Sys) ReplicationStatusWithContext(ctx context.Context, path string) (*ReplicationStatusResponse, error) { - // default to replication/status - if path == "" { - path = apiRepStatusPath - } - - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, path) - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer func() { _ = resp.Body.Close() }() - - // First decode response into a map[string]interface{} - data := make(map[string]interface{}) - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - if err := dec.Decode(&data); err != nil { - return nil, err - } - - rawData, ok := data["data"] - if !ok { - return nil, fmt.Errorf("empty data in replication status response") - } - - s := &ReplicationStatusResponse{} - g := &ReplicationStatusGenericResponse{} - switch { - case path == apiRepPerformanceStatusPath: - err = mapstructure.Decode(rawData, g) - if err != nil { - return nil, err - } - s.Performance = *g - case path == apiRepDRStatusPath: - err = mapstructure.Decode(rawData, g) - if err != nil { - return nil, err - } - s.DR = *g - default: - err = mapstructure.Decode(rawData, s) - if err != nil { - return nil, err - } - return s, err - } - - return s, err -} diff --git a/api/request.go b/api/request.go index ecf783701ad4b..1cbbc62f908b1 100644 --- a/api/request.go +++ b/api/request.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -11,6 +8,8 @@ import ( "net/http" "net/url" + "github.com/hashicorp/vault/sdk/helper/consts" + retryablehttp "github.com/hashicorp/go-retryablehttp" ) @@ -128,7 +127,7 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { } if len(r.ClientToken) != 0 { - req.Header.Set(AuthHeaderName, r.ClientToken) + req.Header.Set(consts.AuthHeaderName, r.ClientToken) } if len(r.WrapTTL) != 0 { diff --git a/api/request_test.go b/api/request_test.go index ac21b80198728..f2657e61c5038 100644 --- a/api/request_test.go +++ b/api/request_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/response.go b/api/response.go index 2842c125514a4..9ce3d12aacca1 100644 --- a/api/response.go +++ b/api/response.go @@ -1,15 +1,14 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( "bytes" - "encoding/json" "fmt" "io" "io/ioutil" "net/http" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" ) // Response is a raw response that wraps an HTTP response. @@ -21,9 +20,7 @@ type Response struct { // will consume the response body, but will not close it. Close must // still be called. func (r *Response) DecodeJSON(out interface{}) error { - dec := json.NewDecoder(r.Body) - dec.UseNumber() - return dec.Decode(out) + return jsonutil.DecodeJSONFromReader(r.Body, out) } // Error returns an error response if there is one. If there is an error, @@ -45,7 +42,7 @@ func (r *Response) Error() error { r.Body.Close() r.Body = ioutil.NopCloser(bodyBuf) - ns := r.Header.Get(NamespaceHeaderName) + ns := r.Header.Get(consts.NamespaceHeaderName) // Build up the error object respErr := &ResponseError{ @@ -59,9 +56,7 @@ func (r *Response) Error() error { // in a bytes.Reader here so that the JSON decoder doesn't move the // read pointer for the original buffer. var resp ErrorResponse - dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) - dec.UseNumber() - if err := dec.Decode(&resp); err != nil { + if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { // Store the fact that we couldn't decode the errors respErr.RawError = true respErr.Errors = []string{bodyBuf.String()} diff --git a/api/secret.go b/api/secret.go index 3d15f7a806a6e..feb7dfdf96597 100644 --- a/api/secret.go +++ b/api/secret.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -14,6 +11,8 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" ) // Secret is the structure returned for every secret within Vault. @@ -284,22 +283,6 @@ type SecretWrapInfo struct { WrappedAccessor string `json:"wrapped_accessor"` } -type MFAMethodID struct { - Type string `json:"type,omitempty"` - ID string `json:"id,omitempty"` - UsesPasscode bool `json:"uses_passcode,omitempty"` - Name string `json:"name,omitempty"` -} - -type MFAConstraintAny struct { - Any []*MFAMethodID `json:"any,omitempty"` -} - -type MFARequirement struct { - MFARequestID string `json:"mfa_request_id,omitempty"` - MFAConstraints map[string]*MFAConstraintAny `json:"mfa_constraints,omitempty"` -} - // SecretAuth is the structure containing auth information if we have it. type SecretAuth struct { ClientToken string `json:"client_token"` @@ -314,7 +297,7 @@ type SecretAuth struct { LeaseDuration int `json:"lease_duration"` Renewable bool `json:"renewable"` - MFARequirement *MFARequirement `json:"mfa_requirement"` + MFARequirement *logical.MFARequirement `json:"mfa_requirement"` } // ParseSecret is used to parse a secret value from JSON from an io.Reader. @@ -340,18 +323,14 @@ func ParseSecret(r io.Reader) (*Secret, error) { // First decode the JSON into a map[string]interface{} var secret Secret - dec := json.NewDecoder(&buf) - dec.UseNumber() - if err := dec.Decode(&secret); err != nil { + if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { return nil, err } // If the secret is null, add raw data to secret data if present if reflect.DeepEqual(secret, Secret{}) { data := make(map[string]interface{}) - dec := json.NewDecoder(&teebuf) - dec.UseNumber() - if err := dec.Decode(&data); err != nil { + if err := jsonutil.DecodeJSONFromReader(&teebuf, &data); err != nil { return nil, err } errRaw, errPresent := data["errors"] diff --git a/api/ssh.go b/api/ssh.go index 28510eecc23f3..b832e27482909 100644 --- a/api/ssh.go +++ b/api/ssh.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/ssh_agent.go b/api/ssh_agent.go index e61503772fa3c..03fe2bea53ed2 100644 --- a/api/ssh_agent.go +++ b/api/ssh_agent.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -18,6 +15,7 @@ import ( rootcerts "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/hclutil" "github.com/mitchellh/mapstructure" ) @@ -171,7 +169,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { "tls_skip_verify", "tls_server_name", } - if err := CheckHCLKeys(list, valid); err != nil { + if err := hclutil.CheckHCLKeys(list, valid); err != nil { return nil, multierror.Prefix(err, "ssh_helper:") } @@ -187,33 +185,6 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { return &c, nil } -func CheckHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) - } - } - - return result -} - // SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend // mounted at default path ("ssh"). func (c *Client) SSHHelper() *SSHHelper { diff --git a/api/ssh_agent_test.go b/api/ssh_agent_test.go index 38117e42a7062..d233b09c476cf 100644 --- a/api/ssh_agent_test.go +++ b/api/ssh_agent_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys.go b/api/sys.go index 81ebb3a2509f2..5fb111887c0d3 100644 --- a/api/sys.go +++ b/api/sys.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api // Sys is used to perform system-related operations on Vault. diff --git a/api/sys_audit.go b/api/sys_audit.go index 2244087aad588..82d9aab0b7a01 100644 --- a/api/sys_audit.go +++ b/api/sys_audit.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_auth.go b/api/sys_auth.go index e814412191f35..238bd5e468a02 100644 --- a/api/sys_auth.go +++ b/api/sys_auth.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_capabilities.go b/api/sys_capabilities.go index 6310d42fcf46c..af306a07f3126 100644 --- a/api/sys_capabilities.go +++ b/api/sys_capabilities.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_config_cors.go b/api/sys_config_cors.go index e80aa9d8b4175..1e2cda4f48cbf 100644 --- a/api/sys_config_cors.go +++ b/api/sys_config_cors.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_generate_root.go b/api/sys_generate_root.go index da4ad2f9b73ba..096cadb793d90 100644 --- a/api/sys_generate_root.go +++ b/api/sys_generate_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_hastatus.go b/api/sys_hastatus.go index 2b2aa7c3e9806..d89d59651a92f 100644 --- a/api/sys_hastatus.go +++ b/api/sys_hastatus.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_health.go b/api/sys_health.go index 13fd8d4d37432..953c1c21eaa3c 100644 --- a/api/sys_health.go +++ b/api/sys_health.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_init.go b/api/sys_init.go index 13fa94806976b..05dea86f6ab5c 100644 --- a/api/sys_init.go +++ b/api/sys_init.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_leader.go b/api/sys_leader.go index 868914d3b1394..a74e206ebed45 100644 --- a/api/sys_leader.go +++ b/api/sys_leader.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_leases.go b/api/sys_leases.go index c46f07e64b41f..c02402f5314c3 100644 --- a/api/sys_leases.go +++ b/api/sys_leases.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_mfa.go b/api/sys_mfa.go index 2be6695846486..a1ba1bd80f944 100644 --- a/api/sys_mfa.go +++ b/api/sys_mfa.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_monitor.go b/api/sys_monitor.go index 15a8a13d175c9..6813799f01415 100644 --- a/api/sys_monitor.go +++ b/api/sys_monitor.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -8,6 +5,8 @@ import ( "context" "fmt" "net/http" + + "github.com/hashicorp/vault/sdk/helper/logging" ) // Monitor returns a channel that outputs strings containing the log messages @@ -21,7 +20,7 @@ func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (c r.Params.Add("log_level", logLevel) } - if logFormat == "" { + if logFormat == "" || logFormat == logging.UnspecifiedFormat.String() { r.Params.Add("log_format", "standard") } else { r.Params.Add("log_format", logFormat) diff --git a/api/sys_mounts.go b/api/sys_mounts.go index a6c2a0f5412e6..75173b4d8f2d8 100644 --- a/api/sys_mounts.go +++ b/api/sys_mounts.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -257,20 +254,20 @@ type MountInput struct { } type MountConfigInput struct { - Options map[string]string `json:"options" mapstructure:"options"` - DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` - Description *string `json:"description,omitempty" mapstructure:"description"` - MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` - AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` - TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` - AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` - PluginVersion string `json:"plugin_version,omitempty"` - UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + PluginVersion string `json:"plugin_version,omitempty"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } @@ -292,35 +289,21 @@ type MountOutput struct { } type MountConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` - AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` - TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` - AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` - UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } -type UserLockoutConfigInput struct { - LockoutThreshold string `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` - LockoutDuration string `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` - LockoutCounterResetDuration string `json:"lockout_counter_reset_duration,omitempty" structs:"lockout_counter_reset_duration" mapstructure:"lockout_counter_reset_duration"` - DisableLockout *bool `json:"lockout_disable,omitempty" structs:"lockout_disable" mapstructure:"lockout_disable"` -} - -type UserLockoutConfigOutput struct { - LockoutThreshold uint `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` - LockoutDuration int `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` - LockoutCounterReset int `json:"lockout_counter_reset,omitempty" structs:"lockout_counter_reset" mapstructure:"lockout_counter_reset"` - DisableLockout *bool `json:"disable_lockout,omitempty" structs:"disable_lockout" mapstructure:"disable_lockout"` -} - type MountMigrationOutput struct { MigrationID string `mapstructure:"migration_id"` } diff --git a/api/sys_mounts_test.go b/api/sys_mounts_test.go index a810c6268a1c7..d461a9d495cf8 100644 --- a/api/sys_mounts_test.go +++ b/api/sys_mounts_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_plugins.go b/api/sys_plugins.go index 2ee024d9defc8..989c78f1d5baa 100644 --- a/api/sys_plugins.go +++ b/api/sys_plugins.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -10,19 +7,20 @@ import ( "net/http" "time" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/mapstructure" ) // ListPluginsInput is used as input to the ListPlugins function. type ListPluginsInput struct { // Type of the plugin. Required. - Type PluginType `json:"type"` + Type consts.PluginType `json:"type"` } // ListPluginsResponse is the response from the ListPlugins call. type ListPluginsResponse struct { // PluginsByType is the list of plugins by type. - PluginsByType map[PluginType][]string `json:"types"` + PluginsByType map[consts.PluginType][]string `json:"types"` Details []PluginDetails `json:"details,omitempty"` @@ -70,11 +68,11 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( } result := &ListPluginsResponse{ - PluginsByType: make(map[PluginType][]string), + PluginsByType: make(map[consts.PluginType][]string), } switch i.Type { - case PluginTypeUnknown: - for _, pluginType := range PluginTypes { + case consts.PluginTypeUnknown: + for _, pluginType := range consts.PluginTypes { pluginsRaw, ok := secret.Data[pluginType.String()] if !ok { continue @@ -115,7 +113,7 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( } switch i.Type { - case PluginTypeUnknown: + case consts.PluginTypeUnknown: result.Details = details default: // Filter for just the queried type. @@ -135,8 +133,8 @@ type GetPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type PluginType `json:"type"` - Version string `json:"version"` + Type consts.PluginType `json:"type"` + Version string `json:"version"` } // GetPluginResponse is the response from the GetPlugin call. @@ -188,7 +186,7 @@ type RegisterPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type PluginType `json:"type"` + Type consts.PluginType `json:"type"` // Args is the list of args to spawn the process with. Args []string `json:"args,omitempty"` @@ -233,7 +231,7 @@ type DeregisterPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type PluginType `json:"type"` + Type consts.PluginType `json:"type"` // Version of the plugin. Optional. Version string `json:"version,omitempty"` @@ -370,11 +368,11 @@ func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInp } // catalogPathByType is a helper to construct the proper API path by plugin type -func catalogPathByType(pluginType PluginType, name string) string { +func catalogPathByType(pluginType consts.PluginType, name string) string { path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) // Backwards compat, if type is not provided then use old path - if pluginType == PluginTypeUnknown { + if pluginType == consts.PluginTypeUnknown { path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) } diff --git a/api/sys_plugins_test.go b/api/sys_plugins_test.go index 3673181472a7d..b3b94d7302893 100644 --- a/api/sys_plugins_test.go +++ b/api/sys_plugins_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -10,7 +7,8 @@ import ( "reflect" "testing" - "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" ) func TestRegisterPlugin(t *testing.T) { @@ -45,32 +43,32 @@ func TestListPlugins(t *testing.T) { for name, tc := range map[string]struct { input ListPluginsInput - expectedPlugins map[PluginType][]string + expectedPlugins map[consts.PluginType][]string }{ "no type specified": { input: ListPluginsInput{}, - expectedPlugins: map[PluginType][]string{ - PluginTypeCredential: {"alicloud"}, - PluginTypeDatabase: {"cassandra-database-plugin"}, - PluginTypeSecrets: {"ad", "alicloud"}, + expectedPlugins: map[consts.PluginType][]string{ + consts.PluginTypeCredential: {"alicloud"}, + consts.PluginTypeDatabase: {"cassandra-database-plugin"}, + consts.PluginTypeSecrets: {"ad", "alicloud"}, }, }, "only auth plugins": { - input: ListPluginsInput{Type: PluginTypeCredential}, - expectedPlugins: map[PluginType][]string{ - PluginTypeCredential: {"alicloud"}, + input: ListPluginsInput{Type: consts.PluginTypeCredential}, + expectedPlugins: map[consts.PluginType][]string{ + consts.PluginTypeCredential: {"alicloud"}, }, }, "only database plugins": { - input: ListPluginsInput{Type: PluginTypeDatabase}, - expectedPlugins: map[PluginType][]string{ - PluginTypeDatabase: {"cassandra-database-plugin"}, + input: ListPluginsInput{Type: consts.PluginTypeDatabase}, + expectedPlugins: map[consts.PluginType][]string{ + consts.PluginTypeDatabase: {"cassandra-database-plugin"}, }, }, "only secret plugins": { - input: ListPluginsInput{Type: PluginTypeSecrets}, - expectedPlugins: map[PluginType][]string{ - PluginTypeSecrets: {"ad", "alicloud"}, + input: ListPluginsInput{Type: consts.PluginTypeSecrets}, + expectedPlugins: map[consts.PluginType][]string{ + consts.PluginTypeSecrets: {"ad", "alicloud"}, }, }, } { @@ -106,7 +104,7 @@ func TestListPlugins(t *testing.T) { } for _, actual := range resp.Details { - pluginType, err := ParsePluginType(actual.Type) + pluginType, err := consts.ParsePluginType(actual.Type) if err != nil { t.Fatal(err) } @@ -175,7 +173,7 @@ func TestGetPlugin(t *testing.T) { input := GetPluginInput{ Name: "azure", - Type: PluginTypeSecrets, + Type: consts.PluginTypeSecrets, } if tc.version != "" { input.Version = tc.version diff --git a/api/sys_policy.go b/api/sys_policy.go index 9ddffe4ec7c4a..4a4f91b08c71e 100644 --- a/api/sys_policy.go +++ b/api/sys_policy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_raft.go b/api/sys_raft.go index 29bfed0f5613b..7806a1418df83 100644 --- a/api/sys_raft.go +++ b/api/sys_raft.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_rekey.go b/api/sys_rekey.go index 573201751c7b9..2ac8a4743bcfd 100644 --- a/api/sys_rekey.go +++ b/api/sys_rekey.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_rotate.go b/api/sys_rotate.go index 295d989f9e2a1..fa86886c35b8e 100644 --- a/api/sys_rotate.go +++ b/api/sys_rotate.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/sys_seal.go b/api/sys_seal.go index 7a9c5621ed194..c772ae0fc2600 100644 --- a/api/sys_seal.go +++ b/api/sys_seal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( @@ -96,23 +93,22 @@ func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*Sea } type SealStatusResponse struct { - Type string `json:"type"` - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - T int `json:"t"` - N int `json:"n"` - Progress int `json:"progress"` - Nonce string `json:"nonce"` - Version string `json:"version"` - BuildDate string `json:"build_date"` - Migration bool `json:"migration"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - RecoverySeal bool `json:"recovery_seal"` - StorageType string `json:"storage_type,omitempty"` - HCPLinkStatus string `json:"hcp_link_status,omitempty"` - HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` - Warnings []string `json:"warnings,omitempty"` + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + BuildDate string `json:"build_date"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` + HCPLinkStatus string `json:"hcp_link_status,omitempty"` + HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` } type UnsealOpts struct { diff --git a/api/sys_stepdown.go b/api/sys_stepdown.go index c55ed1e666db9..833f31a6f7602 100644 --- a/api/sys_stepdown.go +++ b/api/sys_stepdown.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package api import ( diff --git a/api/test-fixtures/agent_config.hcl b/api/test-fixtures/agent_config.hcl index 38d8026057f52..8339f53d7eaad 100644 --- a/api/test-fixtures/agent_config.hcl +++ b/api/test-fixtures/agent_config.hcl @@ -1,5 +1,2 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - vault_addr="http://127.0.0.1:8200" ssh_mount_point="ssh" diff --git a/audit/audit.go b/audit/audit.go index 35a3d38a0558d..5641b449af309 100644 --- a/audit/audit.go +++ b/audit/audit.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( diff --git a/audit/format.go b/audit/format.go index d595f2fd17401..6cafc1b4f3aae 100644 --- a/audit/format.go +++ b/audit/format.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( @@ -11,7 +8,7 @@ import ( "strings" "time" - "github.com/go-jose/go-jose/v3/jwt" + squarejwt "gopkg.in/square/go-jose.v2/jwt" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/salt" @@ -92,9 +89,9 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config reqType = "request" } reqEntry := &AuditRequestEntry{ - Type: reqType, - Error: errString, - ForwardedFrom: req.ForwardedFrom, + Type: reqType, + Error: errString, + Auth: &AuditAuth{ ClientToken: auth.ClientToken, Accessor: auth.Accessor, @@ -112,18 +109,14 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config }, Request: &AuditRequest{ - ID: req.ID, - ClientID: req.ClientID, - ClientToken: req.ClientToken, - ClientTokenAccessor: req.ClientTokenAccessor, - Operation: req.Operation, - MountPoint: req.MountPoint, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - MountRunningVersion: req.MountRunningVersion(), - MountRunningSha256: req.MountRunningSha256(), - MountIsExternalPlugin: req.MountIsExternalPlugin(), - MountClass: req.MountClass(), + ID: req.ID, + ClientID: req.ClientID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + Operation: req.Operation, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, Namespace: &AuditNamespace{ ID: ns.ID, Path: ns.Path, @@ -201,24 +194,7 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config connState = in.Request.Connection.ConnState } - elideListResponseData := config.ElideListResponses && req.Operation == logical.ListOperation - - var respData map[string]interface{} - if config.Raw { - // In the non-raw case, elision of list response data occurs inside HashResponse, to avoid redundant deep - // copies and hashing of data only to elide it later. In the raw case, we need to do it here. - if elideListResponseData && resp.Data != nil { - // Copy the data map before making changes, but we only need to go one level deep in this case - respData = make(map[string]interface{}, len(resp.Data)) - for k, v := range resp.Data { - respData[k] = v - } - - doElideListResponseData(respData) - } else { - respData = resp.Data - } - } else { + if !config.Raw { auth, err = HashAuth(salt, auth, config.HMACAccessor) if err != nil { return err @@ -229,12 +205,10 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config return err } - resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys, elideListResponseData) + resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys) if err != nil { return err } - - respData = resp.Data } var errString string @@ -297,9 +271,8 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config respType = "response" } respEntry := &AuditResponseEntry{ - Type: respType, - Error: errString, - Forwarded: req.ForwardedFrom != "", + Type: respType, + Error: errString, Auth: &AuditAuth{ ClientToken: auth.ClientToken, Accessor: auth.Accessor, @@ -318,18 +291,14 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config }, Request: &AuditRequest{ - ID: req.ID, - ClientToken: req.ClientToken, - ClientTokenAccessor: req.ClientTokenAccessor, - ClientID: req.ClientID, - Operation: req.Operation, - MountPoint: req.MountPoint, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - MountRunningVersion: req.MountRunningVersion(), - MountRunningSha256: req.MountRunningSha256(), - MountIsExternalPlugin: req.MountIsExternalPlugin(), - MountClass: req.MountClass(), + ID: req.ID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + ClientID: req.ClientID, + Operation: req.Operation, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, Namespace: &AuditNamespace{ ID: ns.ID, Path: ns.Path, @@ -345,20 +314,16 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config }, Response: &AuditResponse{ - MountPoint: req.MountPoint, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - MountRunningVersion: req.MountRunningVersion(), - MountRunningSha256: req.MountRunningSha256(), - MountIsExternalPlugin: req.MountIsExternalPlugin(), - MountClass: req.MountClass(), - Auth: respAuth, - Secret: respSecret, - Data: respData, - Warnings: resp.Warnings, - Redirect: resp.Redirect, - WrapInfo: respWrapInfo, - Headers: resp.Headers, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + Auth: respAuth, + Secret: respSecret, + Data: resp.Data, + Warnings: resp.Warnings, + Redirect: resp.Redirect, + WrapInfo: respWrapInfo, + Headers: resp.Headers, }, } @@ -393,23 +358,21 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config // AuditRequestEntry is the structure of a request audit log entry in Audit. type AuditRequestEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type,omitempty"` - Auth *AuditAuth `json:"auth,omitempty"` - Request *AuditRequest `json:"request,omitempty"` - Error string `json:"error,omitempty"` - ForwardedFrom string `json:"forwarded_from,omitempty"` // Populated in Enterprise when a request is forwarded + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Error string `json:"error,omitempty"` } // AuditResponseEntry is the structure of a response audit log entry in Audit. type AuditResponseEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type,omitempty"` - Auth *AuditAuth `json:"auth,omitempty"` - Request *AuditRequest `json:"request,omitempty"` - Response *AuditResponse `json:"response,omitempty"` - Error string `json:"error,omitempty"` - Forwarded bool `json:"forwarded,omitempty"` + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Response *AuditResponse `json:"response,omitempty"` + Error string `json:"error,omitempty"` } type AuditRequest struct { @@ -420,10 +383,6 @@ type AuditRequest struct { MountPoint string `json:"mount_point,omitempty"` MountType string `json:"mount_type,omitempty"` MountAccessor string `json:"mount_accessor,omitempty"` - MountRunningVersion string `json:"mount_running_version,omitempty"` - MountRunningSha256 string `json:"mount_running_sha256,omitempty"` - MountClass string `json:"mount_class,omitempty"` - MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` ClientToken string `json:"client_token,omitempty"` ClientTokenAccessor string `json:"client_token_accessor,omitempty"` Namespace *AuditNamespace `json:"namespace,omitempty"` @@ -438,20 +397,16 @@ type AuditRequest struct { } type AuditResponse struct { - Auth *AuditAuth `json:"auth,omitempty"` - MountPoint string `json:"mount_point,omitempty"` - MountType string `json:"mount_type,omitempty"` - MountAccessor string `json:"mount_accessor,omitempty"` - MountRunningVersion string `json:"mount_running_plugin_version,omitempty"` - MountRunningSha256 string `json:"mount_running_sha256,omitempty"` - MountClass string `json:"mount_class,omitempty"` - MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` - Secret *AuditSecret `json:"secret,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Redirect string `json:"redirect,omitempty"` - WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountType string `json:"mount_type,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + Secret *AuditSecret `json:"secret,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Redirect string `json:"redirect,omitempty"` + WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` } type AuditAuth struct { @@ -535,12 +490,12 @@ func parseVaultTokenFromJWT(token string) *string { return nil } - parsedJWT, err := jwt.ParseSigned(token) + parsedJWT, err := squarejwt.ParseSigned(token) if err != nil { return nil } - var claims jwt.Claims + var claims squarejwt.Claims if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { return nil } @@ -548,7 +503,7 @@ func parseVaultTokenFromJWT(token string) *string { return &claims.ID } -// NewTemporaryFormatter creates a formatter not backed by a persistent salt +// Create a formatter not backed by a persistent salt. func NewTemporaryFormatter(format, prefix string) *AuditFormatter { temporarySalt := func(ctx context.Context) (*salt.Salt, error) { return salt.NewNonpersistentSalt(), nil @@ -569,22 +524,3 @@ func NewTemporaryFormatter(format, prefix string) *AuditFormatter { } return ret } - -// doElideListResponseData performs the actual elision of list operation response data, once surrounding code has -// determined it should apply to a particular request. The data map that is passed in must be a copy that is safe to -// modify in place, but need not be a full recursive deep copy, as only top-level keys are changed. -// -// See the documentation of the controlling option in FormatterConfig for more information on the purpose. -func doElideListResponseData(data map[string]interface{}) { - for k, v := range data { - if k == "keys" { - if vSlice, ok := v.([]string); ok { - data[k] = len(vSlice) - } - } else if k == "key_info" { - if vMap, ok := v.(map[string]interface{}); ok { - data[k] = len(vMap) - } - } - } -} diff --git a/audit/format_json.go b/audit/format_json.go index 74f4138184a46..4003c05a72178 100644 --- a/audit/format_json.go +++ b/audit/format_json.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( diff --git a/audit/format_json_test.go b/audit/format_json_test.go index fa31cde83bea6..e4a703d12ad42 100644 --- a/audit/format_json_test.go +++ b/audit/format_json_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( diff --git a/audit/format_jsonx.go b/audit/format_jsonx.go index 20352a2deadeb..bff244099a9ab 100644 --- a/audit/format_jsonx.go +++ b/audit/format_jsonx.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go index fb6046195187e..00921c0c71a95 100644 --- a/audit/format_jsonx_test.go +++ b/audit/format_jsonx_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( diff --git a/audit/format_test.go b/audit/format_test.go index 5395d916cc816..4f3cc5cbb2e57 100644 --- a/audit/format_test.go +++ b/audit/format_test.go @@ -1,79 +1,47 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( "context" "io" + "io/ioutil" "testing" - "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/copystructure" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -type testingFormatWriter struct { - salt *salt.Salt - lastRequest *AuditRequestEntry - lastResponse *AuditResponseEntry +type noopFormatWriter struct { + salt *salt.Salt + SaltFunc func() (*salt.Salt, error) } -func (fw *testingFormatWriter) WriteRequest(_ io.Writer, entry *AuditRequestEntry) error { - fw.lastRequest = entry +func (n *noopFormatWriter) WriteRequest(_ io.Writer, _ *AuditRequestEntry) error { return nil } -func (fw *testingFormatWriter) WriteResponse(_ io.Writer, entry *AuditResponseEntry) error { - fw.lastResponse = entry +func (n *noopFormatWriter) WriteResponse(_ io.Writer, _ *AuditResponseEntry) error { return nil } -func (fw *testingFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - if fw.salt != nil { - return fw.salt, nil +func (n *noopFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { + if n.salt != nil { + return n.salt, nil } var err error - fw.salt, err = salt.NewSalt(ctx, nil, nil) + n.salt, err = salt.NewSalt(ctx, nil, nil) if err != nil { return nil, err } - return fw.salt, nil -} - -// hashExpectedValueForComparison replicates enough of the audit HMAC process on a piece of expected data in a test, -// so that we can use assert.Equal to compare the expected and output values. -func (fw *testingFormatWriter) hashExpectedValueForComparison(input map[string]interface{}) map[string]interface{} { - // Copy input before modifying, since we may re-use the same data in another test - copied, err := copystructure.Copy(input) - if err != nil { - panic(err) - } - copiedAsMap := copied.(map[string]interface{}) - - salter, err := fw.Salt(context.Background()) - if err != nil { - panic(err) - } - - err = hashMap(salter.GetIdentifiedHMAC, copiedAsMap, nil) - if err != nil { - panic(err) - } - - return copiedAsMap + return n.salt, nil } func TestFormatRequestErrors(t *testing.T) { config := FormatterConfig{} formatter := AuditFormatter{ - AuditFormatWriter: &testingFormatWriter{}, + AuditFormatWriter: &noopFormatWriter{}, } - if err := formatter.FormatRequest(context.Background(), io.Discard, config, &logical.LogInput{}); err == nil { + if err := formatter.FormatRequest(context.Background(), ioutil.Discard, config, &logical.LogInput{}); err == nil { t.Fatal("expected error due to nil request") } @@ -88,10 +56,10 @@ func TestFormatRequestErrors(t *testing.T) { func TestFormatResponseErrors(t *testing.T) { config := FormatterConfig{} formatter := AuditFormatter{ - AuditFormatWriter: &testingFormatWriter{}, + AuditFormatWriter: &noopFormatWriter{}, } - if err := formatter.FormatResponse(context.Background(), io.Discard, config, &logical.LogInput{}); err == nil { + if err := formatter.FormatResponse(context.Background(), ioutil.Discard, config, &logical.LogInput{}); err == nil { t.Fatal("expected error due to nil request") } @@ -102,123 +70,3 @@ func TestFormatResponseErrors(t *testing.T) { t.Fatal("expected error due to nil writer") } } - -func TestElideListResponses(t *testing.T) { - tfw := testingFormatWriter{} - formatter := AuditFormatter{&tfw} - ctx := namespace.RootContext(context.Background()) - - type test struct { - name string - inputData map[string]interface{} - expectedData map[string]interface{} - } - - tests := []test{ - { - "nil data", - nil, - nil, - }, - { - "Normal list (keys only)", - map[string]interface{}{ - "keys": []string{"foo", "bar", "baz"}, - }, - map[string]interface{}{ - "keys": 3, - }, - }, - { - "Enhanced list (has key_info)", - map[string]interface{}{ - "keys": []string{"foo", "bar", "baz", "quux"}, - "key_info": map[string]interface{}{ - "foo": "alpha", - "bar": "beta", - "baz": "gamma", - "quux": "delta", - }, - }, - map[string]interface{}{ - "keys": 4, - "key_info": 4, - }, - }, - { - "Unconventional other values in a list response are not touched", - map[string]interface{}{ - "keys": []string{"foo", "bar"}, - "something_else": "baz", - }, - map[string]interface{}{ - "keys": 2, - "something_else": "baz", - }, - }, - { - "Conventional values in a list response are not elided if their data types are unconventional", - map[string]interface{}{ - "keys": map[string]interface{}{ - "You wouldn't expect keys to be a map": nil, - }, - "key_info": []string{ - "You wouldn't expect key_info to be a slice", - }, - }, - map[string]interface{}{ - "keys": map[string]interface{}{ - "You wouldn't expect keys to be a map": nil, - }, - "key_info": []string{ - "You wouldn't expect key_info to be a slice", - }, - }, - }, - } - oneInterestingTestCase := tests[2] - - formatResponse := func( - t *testing.T, - config FormatterConfig, - operation logical.Operation, - inputData map[string]interface{}, - ) { - err := formatter.FormatResponse(ctx, io.Discard, config, &logical.LogInput{ - Request: &logical.Request{Operation: operation}, - Response: &logical.Response{Data: inputData}, - }) - require.Nil(t, err) - } - - t.Run("Default case", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: true} - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - formatResponse(t, config, logical.ListOperation, tc.inputData) - assert.Equal(t, tfw.hashExpectedValueForComparison(tc.expectedData), tfw.lastResponse.Response.Data) - }) - } - }) - - t.Run("When Operation is not list, eliding does not happen", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: true} - tc := oneInterestingTestCase - formatResponse(t, config, logical.ReadOperation, tc.inputData) - assert.Equal(t, tfw.hashExpectedValueForComparison(tc.inputData), tfw.lastResponse.Response.Data) - }) - - t.Run("When ElideListResponses is false, eliding does not happen", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: false} - tc := oneInterestingTestCase - formatResponse(t, config, logical.ListOperation, tc.inputData) - assert.Equal(t, tfw.hashExpectedValueForComparison(tc.inputData), tfw.lastResponse.Response.Data) - }) - - t.Run("When Raw is true, eliding still happens", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: true, Raw: true} - tc := oneInterestingTestCase - formatResponse(t, config, logical.ListOperation, tc.inputData) - assert.Equal(t, tc.expectedData, tfw.lastResponse.Response.Data) - }) -} diff --git a/audit/formatter.go b/audit/formatter.go index 98c393c3b8170..c27035768d359 100644 --- a/audit/formatter.go +++ b/audit/formatter.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( @@ -10,7 +7,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -// Formatter is an interface that is responsible for formatting a +// Formatter is an interface that is responsible for formating a // request/response into some format. Formatters write their output // to an io.Writer. // @@ -24,28 +21,6 @@ type FormatterConfig struct { Raw bool HMACAccessor bool - // Vault lacks pagination in its APIs. As a result, certain list operations can return **very** large responses. - // The user's chosen audit sinks may experience difficulty consuming audit records that swell to tens of megabytes - // of JSON. The responses of list operations are typically not very interesting, as they are mostly lists of keys, - // or, even when they include a "key_info" field, are not returning confidential information. They become even less - // interesting once HMAC-ed by the audit system. - // - // Some example Vault "list" operations that are prone to becoming very large in an active Vault installation are: - // auth/token/accessors/ - // identity/entity/id/ - // identity/entity-alias/id/ - // pki/certs/ - // - // This option exists to provide such users with the option to have response data elided from audit logs, only when - // the operation type is "list". For added safety, the elision only applies to the "keys" and "key_info" fields - // within the response data - these are conventionally the only fields present in a list response - see - // logical.ListResponse, and logical.ListResponseWithInfo. However, other fields are technically possible if a - // plugin author writes unusual code, and these will be preserved in the audit log even with this option enabled. - // The elision replaces the values of the "keys" and "key_info" fields with an integer count of the number of - // entries. This allows even the elided audit logs to still be useful for answering questions like - // "Was any data returned?" or "How many records were listed?". - ElideListResponses bool - // This should only ever be used in a testing context OmitTime bool } diff --git a/audit/hashstructure.go b/audit/hashstructure.go index cd4f8085d13d3..11c6214ff7b34 100644 --- a/audit/hashstructure.go +++ b/audit/hashstructure.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( @@ -101,13 +98,7 @@ func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKey } // HashResponse returns a hashed copy of the logical.Request input. -func HashResponse( - salter *salt.Salt, - in *logical.Response, - HMACAccessor bool, - nonHMACDataKeys []string, - elideListResponseData bool, -) (*logical.Response, error) { +func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Response, error) { if in == nil { return nil, nil } @@ -138,20 +129,12 @@ func HashResponse( mapCopy[logical.HTTPRawBody] = string(b) } - // Processing list response data elision takes place at this point in the code for performance reasons: - // - take advantage of the deep copy of resp.Data that was going to be done anyway for hashing - // - but elide data before potentially spending time hashing it - if elideListResponseData { - doElideListResponseData(mapCopy) - } - err = hashMap(fn, mapCopy, nonHMACDataKeys) if err != nil { return nil, err } resp.Data = mapCopy } - if resp.WrapInfo != nil { var err error resp.WrapInfo, err = HashWrapInfo(salter, resp.WrapInfo, HMACAccessor) diff --git a/audit/hashstructure_test.go b/audit/hashstructure_test.go index c65931f7c5be2..7f6c5a8694857 100644 --- a/audit/hashstructure_test.go +++ b/audit/hashstructure_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package audit import ( @@ -296,7 +293,7 @@ func TestHashResponse(t *testing.T) { } for _, tc := range cases { input := fmt.Sprintf("%#v", tc.Input) - out, err := HashResponse(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys, false) + out, err := HashResponse(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) if err != nil { t.Fatalf("err: %s\n\n%s", err, input) } diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go index 2c3ef3f8e0774..9e7d7c36c7108 100644 --- a/builtin/audit/file/backend.go +++ b/builtin/audit/file/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( @@ -74,15 +71,6 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err logRaw = b } - elideListResponses := false - if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) - if err != nil { - return nil, err - } - elideListResponses = value - } - // Check if mode is provided mode := os.FileMode(0o600) if modeRaw, ok := conf.Config["mode"]; ok { @@ -114,9 +102,8 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err saltView: conf.SaltView, salt: new(atomic.Value), formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - ElideListResponses: elideListResponses, + Raw: logRaw, + HMACAccessor: hmacAccessor, }, } diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go index ad082ace5d432..817518c50bd85 100644 --- a/builtin/audit/file/backend_test.go +++ b/builtin/audit/file/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go index 4c649e0e9c748..7a000b2c7c667 100644 --- a/builtin/audit/socket/backend.go +++ b/builtin/audit/socket/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package socket import ( @@ -12,7 +9,7 @@ import ( "sync" "time" - "github.com/hashicorp/go-multierror" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/sdk/helper/salt" @@ -76,22 +73,12 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err logRaw = b } - elideListResponses := false - if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) - if err != nil { - return nil, err - } - elideListResponses = value - } - b := &Backend{ saltConfig: conf.SaltConfig, saltView: conf.SaltView, formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - ElideListResponses: elideListResponses, + Raw: logRaw, + HMACAccessor: hmacAccessor, }, writeDuration: writeDuration, diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go index 2da92fe2e40e5..9c7b775b88c80 100644 --- a/builtin/audit/syslog/backend.go +++ b/builtin/audit/syslog/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package syslog import ( @@ -66,15 +63,6 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err logRaw = b } - elideListResponses := false - if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) - if err != nil { - return nil, err - } - elideListResponses = value - } - // Get the logger logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag) if err != nil { @@ -86,9 +74,8 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err saltConfig: conf.SaltConfig, saltView: conf.SaltView, formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - ElideListResponses: elideListResponses, + Raw: logRaw, + HMACAccessor: hmacAccessor, }, } diff --git a/builtin/credential/app-id/backend.go b/builtin/credential/app-id/backend.go new file mode 100644 index 0000000000000..b77221d75ba61 --- /dev/null +++ b/builtin/credential/app-id/backend.go @@ -0,0 +1,184 @@ +package appId + +import ( + "context" + "sync" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b, err := Backend(conf) + if err != nil { + return nil, err + } + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(conf *logical.BackendConfig) (*backend, error) { + var b backend + b.MapAppId = &framework.PolicyMap{ + PathMap: framework.PathMap{ + Name: "app-id", + Schema: map[string]*framework.FieldSchema{ + "display_name": { + Type: framework.TypeString, + Description: "A name to map to this app ID for logs.", + }, + + "value": { + Type: framework.TypeString, + Description: "Policies for the app ID.", + }, + }, + }, + DefaultKey: "default", + } + + b.MapUserId = &framework.PathMap{ + Name: "user-id", + Schema: map[string]*framework.FieldSchema{ + "cidr_block": { + Type: framework.TypeString, + Description: "If not blank, restricts auth by this CIDR block", + }, + + "value": { + Type: framework.TypeString, + Description: "App IDs that this user associates with.", + }, + }, + } + + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login", + "login/*", + }, + }, + Paths: framework.PathAppend([]*framework.Path{ + pathLogin(&b), + pathLoginWithAppIDPath(&b), + }, + b.MapAppId.Paths(), + b.MapUserId.Paths(), + ), + AuthRenew: b.pathLoginRenew, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, + } + + b.view = conf.StorageView + b.MapAppId.SaltFunc = b.Salt + b.MapUserId.SaltFunc = b.Salt + + return &b, nil +} + +type backend struct { + *framework.Backend + + salt *salt.Salt + SaltMutex sync.RWMutex + view logical.Storage + MapAppId *framework.PolicyMap + MapUserId *framework.PathMap +} + +func (b *backend) Salt(ctx context.Context) (*salt.Salt, error) { + b.SaltMutex.RLock() + if b.salt != nil { + defer b.SaltMutex.RUnlock() + return b.salt, nil + } + b.SaltMutex.RUnlock() + b.SaltMutex.Lock() + defer b.SaltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(ctx, b.view, &salt.Config{ + HashFunc: salt.SHA1Hash, + Location: salt.DefaultLocation, + }) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *backend) invalidate(_ context.Context, key string) { + switch key { + case salt.DefaultLocation: + b.SaltMutex.Lock() + defer b.SaltMutex.Unlock() + b.salt = nil + } +} + +const backendHelp = ` +The App ID credential provider is used to perform authentication from +within applications or machine by pairing together two hard-to-guess +unique pieces of information: a unique app ID, and a unique user ID. + +The goal of this credential provider is to allow elastic users +(dynamic machines, containers, etc.) to authenticate with Vault without +having to store passwords outside of Vault. It is a single method of +solving the chicken-and-egg problem of setting up Vault access on a machine. +With this provider, nobody except the machine itself has access to both +pieces of information necessary to authenticate. For example: +configuration management will have the app IDs, but the machine itself +will detect its user ID based on some unique machine property such as a +MAC address (or a hash of it with some salt). + +An example, real world process for using this provider: + + 1. Create unique app IDs (UUIDs work well) and map them to policies. + (Path: map/app-id/) + + 2. Store the app IDs within configuration management systems. + + 3. An out-of-band process run by security operators map unique user IDs + to these app IDs. Example: when an instance is launched, a cloud-init + system tells security operators a unique ID for this machine. This + process can be scripted, but the key is that it is out-of-band and + out of reach of configuration management. + (Path: map/user-id/) + + 4. A new server is provisioned. Configuration management configures the + app ID, the server itself detects its user ID. With both of these + pieces of information, Vault can be accessed according to the policy + set by the app ID. + +More details on this process follow: + +The app ID is a unique ID that maps to a set of policies. This ID is +generated by an operator and configured into the backend. The ID itself +is usually a UUID, but any hard-to-guess unique value can be used. + +After creating app IDs, an operator authorizes a fixed set of user IDs +with each app ID. When a valid {app ID, user ID} tuple is given to the +"login" path, then the user is authenticated with the configured app +ID policies. + +The user ID can be any value (just like the app ID), however it is +generally a value unique to a machine, such as a MAC address or instance ID, +or a value hashed from these unique values. + +It is possible to authorize multiple app IDs with each +user ID by writing them as comma-separated values to the map/user-id/ +path. + +It is also possible to renew the auth tokens with 'vault token-renew ' command. +Before the token is renewed, the validity of app ID, user ID and the associated +policies are checked again. +` diff --git a/builtin/credential/app-id/backend_test.go b/builtin/credential/app-id/backend_test.go new file mode 100644 index 0000000000000..a8b7077ef002c --- /dev/null +++ b/builtin/credential/app-id/backend_test.go @@ -0,0 +1,239 @@ +package appId + +import ( + "context" + "fmt" + "testing" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_basic(t *testing.T) { + var b *backend + var err error + var storage logical.Storage + factory := func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b, err = Backend(conf) + if err != nil { + t.Fatal(err) + } + storage = conf.StorageView + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialFactory: factory, + Steps: []logicaltest.TestStep{ + testAccStepMapAppId(t), + testAccStepMapUserId(t), + testAccLogin(t, ""), + testAccLoginAppIDInPath(t, ""), + testAccLoginInvalid(t), + testAccStepDeleteUserId(t), + testAccLoginDeleted(t), + }, + }) + + req := &logical.Request{ + Path: "map/app-id", + Operation: logical.ListOperation, + Storage: storage, + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + keys := resp.Data["keys"].([]string) + if len(keys) != 1 { + t.Fatalf("expected 1 key, got %d", len(keys)) + } + bSalt, err := b.Salt(context.Background()) + if err != nil { + t.Fatal(err) + } + if keys[0] != "s"+bSalt.SaltIDHashFunc("foo", salt.SHA256Hash) { + t.Fatal("value was improperly salted") + } +} + +func TestBackend_cidr(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepMapAppIdDisplayName(t), + testAccStepMapUserIdCidr(t, "192.168.1.0/16"), + testAccLoginCidr(t, "192.168.1.5", false), + testAccLoginCidr(t, "10.0.1.5", true), + testAccLoginCidr(t, "", true), + }, + }) +} + +func TestBackend_displayName(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepMapAppIdDisplayName(t), + testAccStepMapUserId(t), + testAccLogin(t, "tubbin"), + testAccLoginAppIDInPath(t, "tubbin"), + testAccLoginInvalid(t), + testAccStepDeleteUserId(t), + testAccLoginDeleted(t), + }, + }) +} + +func testAccStepMapAppId(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "map/app-id/foo", + Data: map[string]interface{}{ + "value": "foo,bar", + }, + } +} + +func testAccStepMapAppIdDisplayName(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "map/app-id/foo", + Data: map[string]interface{}{ + "display_name": "tubbin", + "value": "foo,bar", + }, + } +} + +func testAccStepMapUserId(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "map/user-id/42", + Data: map[string]interface{}{ + "value": "foo", + }, + } +} + +func testAccStepDeleteUserId(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "map/user-id/42", + } +} + +func testAccStepMapUserIdCidr(t *testing.T, cidr string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "map/user-id/42", + Data: map[string]interface{}{ + "value": "foo", + "cidr_block": cidr, + }, + } +} + +func testAccLogin(t *testing.T, display string) logicaltest.TestStep { + checkTTL := func(resp *logical.Response) error { + if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" { + return fmt.Errorf("invalid TTL: got %s", resp.Auth.LeaseOptions.TTL) + } + return nil + } + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Data: map[string]interface{}{ + "app_id": "foo", + "user_id": "42", + }, + Unauthenticated: true, + + Check: logicaltest.TestCheckMulti( + logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}), + logicaltest.TestCheckAuthDisplayName(display), + checkTTL, + ), + } +} + +func testAccLoginAppIDInPath(t *testing.T, display string) logicaltest.TestStep { + checkTTL := func(resp *logical.Response) error { + if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" { + return fmt.Errorf("invalid TTL: got %s", resp.Auth.LeaseOptions.TTL) + } + return nil + } + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/foo", + Data: map[string]interface{}{ + "user_id": "42", + }, + Unauthenticated: true, + + Check: logicaltest.TestCheckMulti( + logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}), + logicaltest.TestCheckAuthDisplayName(display), + checkTTL, + ), + } +} + +func testAccLoginCidr(t *testing.T, ip string, err bool) logicaltest.TestStep { + check := logicaltest.TestCheckError() + if !err { + check = logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}) + } + + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Data: map[string]interface{}{ + "app_id": "foo", + "user_id": "42", + }, + ErrorOk: err, + Unauthenticated: true, + RemoteAddr: ip, + + Check: check, + } +} + +func testAccLoginInvalid(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Data: map[string]interface{}{ + "app_id": "foo", + "user_id": "48", + }, + ErrorOk: true, + Unauthenticated: true, + + Check: logicaltest.TestCheckError(), + } +} + +func testAccLoginDeleted(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Data: map[string]interface{}{ + "app_id": "foo", + "user_id": "42", + }, + ErrorOk: true, + Unauthenticated: true, + + Check: logicaltest.TestCheckError(), + } +} diff --git a/builtin/credential/app-id/cmd/app-id/main.go b/builtin/credential/app-id/cmd/app-id/main.go new file mode 100644 index 0000000000000..ce482d6302065 --- /dev/null +++ b/builtin/credential/app-id/cmd/app-id/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + appId "github.com/hashicorp/vault/builtin/credential/app-id" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: appId.Factory, + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/app-id/path_login.go b/builtin/credential/app-id/path_login.go new file mode 100644 index 0000000000000..1c1198a195496 --- /dev/null +++ b/builtin/credential/app-id/path_login.go @@ -0,0 +1,229 @@ +package appId + +import ( + "context" + "crypto/sha1" + "crypto/subtle" + "encoding/hex" + "fmt" + "net" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathLoginWithAppIDPath(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login/(?P.+)", + Fields: map[string]*framework.FieldSchema{ + "app_id": { + Type: framework.TypeString, + Description: "The unique app ID", + }, + + "user_id": { + Type: framework.TypeString, + Description: "The unique user ID", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login$", + Fields: map[string]*framework.FieldSchema{ + "app_id": { + Type: framework.TypeString, + Description: "The unique app ID", + }, + + "user_id": { + Type: framework.TypeString, + Description: "The unique user ID", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + appId := data.Get("app_id").(string) + + if appId == "" { + return nil, fmt.Errorf("missing app_id") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: appId, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + appId := data.Get("app_id").(string) + userId := data.Get("user_id").(string) + + var displayName string + if dispName, resp, err := b.verifyCredentials(ctx, req, appId, userId); err != nil { + return nil, err + } else if resp != nil { + return resp, nil + } else { + displayName = dispName + } + + // Get the policies associated with the app + policies, err := b.MapAppId.Policies(ctx, req.Storage, appId) + if err != nil { + return nil, err + } + + // Store hashes of the app ID and user ID for the metadata + appIdHash := sha1.Sum([]byte(appId)) + userIdHash := sha1.Sum([]byte(userId)) + metadata := map[string]string{ + "app-id": "sha1:" + hex.EncodeToString(appIdHash[:]), + "user-id": "sha1:" + hex.EncodeToString(userIdHash[:]), + } + + return &logical.Response{ + Auth: &logical.Auth{ + InternalData: map[string]interface{}{ + "app-id": appId, + "user-id": userId, + }, + DisplayName: displayName, + Policies: policies, + Metadata: metadata, + LeaseOptions: logical.LeaseOptions{ + Renewable: true, + }, + Alias: &logical.Alias{ + Name: appId, + }, + }, + }, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + appId := req.Auth.InternalData["app-id"].(string) + userId := req.Auth.InternalData["user-id"].(string) + + // Skipping CIDR verification to enable renewal from machines other than + // the ones encompassed by CIDR block. + if _, resp, err := b.verifyCredentials(ctx, req, appId, userId); err != nil { + return nil, err + } else if resp != nil { + return resp, nil + } + + // Get the policies associated with the app + mapPolicies, err := b.MapAppId.Policies(ctx, req.Storage, appId) + if err != nil { + return nil, err + } + if !policyutil.EquivalentPolicies(mapPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies do not match") + } + + return &logical.Response{Auth: req.Auth}, nil +} + +func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, appId, userId string) (string, *logical.Response, error) { + // Ensure both appId and userId are provided + if appId == "" || userId == "" { + return "", logical.ErrorResponse("missing 'app_id' or 'user_id'"), nil + } + + // Look up the apps that this user is allowed to access + appsMap, err := b.MapUserId.Get(ctx, req.Storage, userId) + if err != nil { + return "", nil, err + } + if appsMap == nil { + return "", logical.ErrorResponse("invalid user ID or app ID"), nil + } + + // If there is a CIDR block restriction, check that + if raw, ok := appsMap["cidr_block"]; ok { + _, cidr, err := net.ParseCIDR(raw.(string)) + if err != nil { + return "", nil, fmt.Errorf("invalid restriction cidr: %w", err) + } + + var addr string + if req.Connection != nil { + addr = req.Connection.RemoteAddr + } + if addr == "" || !cidr.Contains(net.ParseIP(addr)) { + return "", logical.ErrorResponse("unauthorized source address"), nil + } + } + + appsRaw, ok := appsMap["value"] + if !ok { + appsRaw = "" + } + + apps, ok := appsRaw.(string) + if !ok { + return "", nil, fmt.Errorf("mapping is not a string") + } + + // Verify that the app is in the list + found := false + appIdBytes := []byte(appId) + for _, app := range strings.Split(apps, ",") { + match := []byte(strings.TrimSpace(app)) + // Protect against a timing attack with the app_id comparison + if subtle.ConstantTimeCompare(match, appIdBytes) == 1 { + found = true + } + } + if !found { + return "", logical.ErrorResponse("invalid user ID or app ID"), nil + } + + // Get the raw data associated with the app + appRaw, err := b.MapAppId.Get(ctx, req.Storage, appId) + if err != nil { + return "", nil, err + } + if appRaw == nil { + return "", logical.ErrorResponse("invalid user ID or app ID"), nil + } + var displayName string + if raw, ok := appRaw["display_name"]; ok { + displayName = raw.(string) + } + + return displayName, nil, nil +} + +const pathLoginSyn = ` +Log in with an App ID and User ID. +` + +const pathLoginDesc = ` +This endpoint authenticates using an application ID, user ID and potential the IP address of the connecting client. +` diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go index 4165fbbec47ef..ebd8d3c06a801 100644 --- a/builtin/credential/approle/backend.go +++ b/builtin/credential/approle/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -15,7 +12,6 @@ import ( ) const ( - operationPrefixAppRole = "app-role" secretIDPrefix = "secret_id/" secretIDLocalPrefix = "secret_id_local/" secretIDAccessorPrefix = "accessor/" diff --git a/builtin/credential/approle/backend_test.go b/builtin/credential/approle/backend_test.go index 683249e316dc6..044f02d2a2a4c 100644 --- a/builtin/credential/approle/backend_test.go +++ b/builtin/credential/approle/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -177,7 +174,7 @@ func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { }, Storage: s, }) - if err != nil && err != logical.ErrInvalidCredentials { + if err != nil { t.Fatal(err) } if resp == nil || !resp.IsError() { @@ -236,7 +233,7 @@ func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { }, Storage: s, }) - if err != nil && err != logical.ErrInvalidCredentials { + if err != nil { t.Fatal(err) } if resp == nil || !resp.IsError() { @@ -295,7 +292,7 @@ func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { }, Storage: s, }) - if err != nil && err != logical.ErrInvalidCredentials { + if err != nil { t.Fatal(err) } if resp == nil || !resp.IsError() { @@ -354,7 +351,7 @@ func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { }, Storage: s, }) - if err != nil && err != logical.ErrInvalidCredentials { + if err != nil { t.Fatal(err) } if resp == nil || !resp.IsError() { @@ -413,7 +410,7 @@ func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { }, Storage: s, }) - if err != nil && err != logical.ErrInvalidCredentials { + if err != nil { t.Fatal(err) } if resp == nil || !resp.IsError() { diff --git a/builtin/credential/approle/cmd/approle/main.go b/builtin/credential/approle/cmd/approle/main.go index 9000ea95810a9..22fa242fa623d 100644 --- a/builtin/credential/approle/cmd/approle/main.go +++ b/builtin/credential/approle/cmd/approle/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: approle.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go index a4c4fe82cde9a..5822519b1c79f 100644 --- a/builtin/credential/approle/path_login.go +++ b/builtin/credential/approle/path_login.go @@ -1,12 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( "context" "fmt" - "net/http" "strings" "time" @@ -19,10 +15,6 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationVerb: "login", - }, Fields: map[string]*framework.FieldSchema{ "role_id": { Type: framework.TypeString, @@ -37,33 +29,12 @@ func pathLogin(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathLoginUpdate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: http.StatusText(http.StatusOK), - }}, - }, }, logical.AliasLookaheadOperation: &framework.PathOperation{ Callback: b.pathLoginUpdateAliasLookahead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: http.StatusText(http.StatusOK), - }}, - }, }, logical.ResolveRoleOperation: &framework.PathOperation{ Callback: b.pathLoginResolveRole, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: http.StatusText(http.StatusOK), - Fields: map[string]*framework.FieldSchema{ - "role": { - Type: framework.TypeString, - Required: true, - }, - }, - }}, - }, }, }, HelpSynopsis: pathLoginHelpSys, @@ -184,7 +155,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if entry == nil { - return logical.ErrorResponse("invalid secret id"), logical.ErrInvalidCredentials + return logical.ErrorResponse("invalid secret id"), nil } // If a secret ID entry does not have a corresponding accessor diff --git a/builtin/credential/approle/path_login_test.go b/builtin/credential/approle/path_login_test.go index 5a09c6c4e3f42..542bf665b6e9a 100644 --- a/builtin/credential/approle/path_login_test.go +++ b/builtin/credential/approle/path_login_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -19,7 +16,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { // Create a role with secret ID binding disabled and only bound cidr list // enabled - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.CreateOperation, Data: map[string]interface{}{ @@ -29,18 +26,24 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { }, Storage: s, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Read the role ID - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole/role-id", Operation: logical.ReadOperation, Storage: s, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleID := resp.Data["role_id"] // Fill in the connection information and login with just the role ID - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Data: map[string]interface{}{ @@ -49,7 +52,9 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { Storage: s, Connection: &logical.Connection{RemoteAddr: "127.0.0.1"}, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Auth == nil { t.Fatal("expected login to succeed") } @@ -61,7 +66,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { } // Override with a secret-id value, verify it doesn't pass - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.UpdateOperation, Data: map[string]interface{}{ @@ -69,6 +74,9 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { }, Storage: s, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleSecretIDReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -84,11 +92,13 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { } roleSecretIDReq.Data["token_bound_cidrs"] = "10.0.0.0/24" - resp = b.requestNoErr(t, roleSecretIDReq) - + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretID := resp.Data["secret_id"] - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Data: map[string]interface{}{ @@ -98,7 +108,9 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { Storage: s, Connection: &logical.Connection{RemoteAddr: "127.0.0.1"}, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Auth == nil { t.Fatal("expected login to succeed") } @@ -121,8 +133,10 @@ func TestAppRole_RoleLogin(t *testing.T) { Path: "role/role1/role-id", Storage: storage, } - resp = b.requestNoErr(t, roleRoleIDReq) - + resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleID := resp.Data["role_id"] roleSecretIDReq := &logical.Request{ @@ -130,8 +144,10 @@ func TestAppRole_RoleLogin(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp = b.requestNoErr(t, roleSecretIDReq) - + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretID := resp.Data["secret_id"] loginData := map[string]interface{}{ @@ -200,15 +216,20 @@ func TestAppRole_RoleLogin(t *testing.T) { Storage: storage, Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleRoleIDReq = &logical.Request{ Operation: logical.ReadOperation, Path: "role/role-period/role-id", Storage: storage, } - resp = b.requestNoErr(t, roleRoleIDReq) - + resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleID = resp.Data["role_id"] roleSecretIDReq = &logical.Request{ @@ -216,8 +237,10 @@ func TestAppRole_RoleLogin(t *testing.T) { Path: "role/role-period/secret-id", Storage: storage, } - resp = b.requestNoErr(t, roleSecretIDReq) - + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretID = resp.Data["secret_id"] loginData["role_id"] = roleID @@ -280,6 +303,8 @@ func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Reques } func TestAppRole_RoleResolve(t *testing.T) { + var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) role := "role1" @@ -289,8 +314,10 @@ func TestAppRole_RoleResolve(t *testing.T) { Path: "role/role1/role-id", Storage: storage, } - resp := b.requestNoErr(t, roleRoleIDReq) - + resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleID := resp.Data["role_id"] roleSecretIDReq := &logical.Request{ @@ -298,8 +325,10 @@ func TestAppRole_RoleResolve(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp = b.requestNoErr(t, roleSecretIDReq) - + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretID := resp.Data["secret_id"] loginData := map[string]interface{}{ @@ -316,7 +345,10 @@ func TestAppRole_RoleResolve(t *testing.T) { }, } - resp = b.requestNoErr(t, loginReq) + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["role"] != role { t.Fatalf("Role was not as expected. Expected %s, received %s", role, resp.Data["role"]) diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index 112d2e0f13d97..eb9c65d23e7b3 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -111,27 +108,12 @@ type roleIDStorageEntry struct { func rolePaths(b *backend) []*framework.Path { defTokenFields := tokenutil.TokenFields() - responseOK := map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - }}, - } - responseNoContent := map[int][]framework.Response{ - http.StatusNoContent: {{ - Description: "No Content", - }}, - } - p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role_name"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "role", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "bind_secret_id": { Type: framework.TypeBool, @@ -187,112 +169,11 @@ can only be set during role creation and once set, it can't be reset later.`, }, }, ExistenceCheck: b.pathRoleExistenceCheck, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.CreateOperation: &framework.PathOperation{ - Callback: b.pathRoleCreateUpdate, - Responses: responseOK, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleCreateUpdate, - Responses: responseOK, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "bind_secret_id": { - Type: framework.TypeBool, - Required: true, - Description: "Impose secret ID to be presented when logging in using this role.", - }, - "secret_id_bound_cidrs": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: "Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.", - }, - "secret_id_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", - }, - "secret_id_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "Duration in seconds after which the issued secret ID expires.", - }, - "local_secret_ids": { - Type: framework.TypeBool, - Required: true, - Description: "If true, the secret identifiers generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later", - }, - "token_bound_cidrs": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, - }, - "token_explicit_max_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.", - }, - "token_max_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "The maximum lifetime of the generated token", - }, - "token_no_default_policy": { - Type: framework.TypeBool, - Required: true, - Description: "If true, the 'default' policy will not automatically be added to generated tokens", - }, - "token_period": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value.", - }, - "token_policies": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: "Comma-separated list of policies", - }, - "token_type": { - Type: framework.TypeString, - Required: true, - Default: "default-service", - Description: "The type of token to generate, service or batch", - }, - "token_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "The initial ttl of the token to generate", - }, - "token_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "The maximum number of times a token may be used, a value of zero means unlimited", - }, - "period": { - Type: framework.TypeDurationSecond, - Required: false, - Description: tokenutil.DeprecationText("token_period"), - Deprecated: true, - }, - "policies": { - Type: framework.TypeCommaStringSlice, - Required: false, - Description: tokenutil.DeprecationText("token_policies"), - Deprecated: true, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.CreateOperation: b.pathRoleCreateUpdate, + logical.UpdateOperation: b.pathRoleCreateUpdate, + logical.ReadOperation: b.pathRoleRead, + logical.DeleteOperation: b.pathRoleDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]), HelpDescription: strings.TrimSpace(roleHelp["role"][1]), @@ -304,71 +185,32 @@ can only be set during role creation and once set, it can't be reset later.`, p, { Pattern: "role/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "roles", - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ListOperation: &framework.PathOperation{ - Callback: b.pathRoleList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Required: true, - }, - }, - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/local-secret-ids$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "local-secret-ids", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleLocalSecretIDsRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "local_secret_ids": { - Type: framework.TypeBool, - Required: true, - Description: "If true, the secret identifiers generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later", - }, - }, - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleLocalSecretIDsRead, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-local-secret-ids"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-local-secret-ids"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "policies", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "policies": { Type: framework.TypeCommaStringSlice, @@ -380,50 +222,20 @@ can only be set during role creation and once set, it can't be reset later.`, Description: defTokenFields["token_policies"].Description, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRolePoliciesUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRolePoliciesRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "policies": { - Type: framework.TypeCommaStringSlice, - Required: false, - Description: tokenutil.DeprecationText("token_policies"), - Deprecated: true, - }, - "token_policies": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: defTokenFields["token_policies"].Description, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRolePoliciesDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRolePoliciesUpdate, + logical.ReadOperation: b.pathRolePoliciesRead, + logical.DeleteOperation: b.pathRolePoliciesDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-policies"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-policies"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "bound-cidr-list", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "bound_cidr_list": { Type: framework.TypeCommaStringSlice, @@ -431,45 +243,20 @@ can only be set during role creation and once set, it can't be reset later.`, of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleBoundCIDRUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleBoundCIDRListRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "bound_cidr_list": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `Deprecated: Please use "secret_id_bound_cidrs" instead. Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, - Deprecated: true, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleBoundCIDRListDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleBoundCIDRUpdate, + logical.ReadOperation: b.pathRoleBoundCIDRListRead, + logical.DeleteOperation: b.pathRoleBoundCIDRListDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-bound-cidr-list"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-bound-cidr-list"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-bound-cidrs$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id-bound-cidrs", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id_bound_cidrs": { Type: framework.TypeCommaStringSlice, @@ -477,88 +264,40 @@ of CIDR blocks. If set, specifies the blocks of IP addresses which can perform t IP addresses which can perform the login operation.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDBoundCIDRUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDBoundCIDRRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id_bound_cidrs": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDBoundCIDRDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDBoundCIDRUpdate, + logical.ReadOperation: b.pathRoleSecretIDBoundCIDRRead, + logical.DeleteOperation: b.pathRoleSecretIDBoundCIDRDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["secret-id-bound-cidrs"][0]), HelpDescription: strings.TrimSpace(roleHelp["secret-id-bound-cidrs"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-bound-cidrs$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "token-bound-cidrs", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: defTokenFields["token_bound_cidrs"].Description, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenBoundCIDRUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenBoundCIDRRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "token_bound_cidrs": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenBoundCIDRDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleTokenBoundCIDRUpdate, + logical.ReadOperation: b.pathRoleTokenBoundCIDRRead, + logical.DeleteOperation: b.pathRoleTokenBoundCIDRDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["token-bound-cidrs"][0]), HelpDescription: strings.TrimSpace(roleHelp["token-bound-cidrs"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "bind-secret-id", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "bind_secret_id": { Type: framework.TypeBool, @@ -566,88 +305,40 @@ IP addresses which can perform the login operation.`, Description: "Impose secret_id to be presented when logging in using this role.", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleBindSecretIDUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleBindSecretIDRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "bind_secret_id": { - Type: framework.TypeBool, - Required: true, - Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleBindSecretIDDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleBindSecretIDUpdate, + logical.ReadOperation: b.pathRoleBindSecretIDRead, + logical.DeleteOperation: b.pathRoleBindSecretIDDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-bind-secret-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-bind-secret-id"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id-num-uses", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id_num_uses": { Type: framework.TypeInt, Description: "Number of times a SecretID can access the role, after which the SecretID will expire.", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDNumUsesUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDNumUsesRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "Number of times a secret ID can access the role, after which the SecretID will expire. Defaults to 0 meaning that the secret ID is of unlimited use.", - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDNumUsesDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDNumUsesUpdate, + logical.ReadOperation: b.pathRoleSecretIDNumUsesRead, + logical.DeleteOperation: b.pathRoleSecretIDNumUsesDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id-ttl", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id_ttl": { Type: framework.TypeDurationSecond, @@ -655,44 +346,20 @@ IP addresses which can perform the login operation.`, to 0, meaning no expiration.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDTTLUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDTTLRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "Duration in seconds after which the issued secret ID should expire. Defaults to 0, meaning no expiration.", - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDTTLDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDTTLUpdate, + logical.ReadOperation: b.pathRoleSecretIDTTLRead, + logical.DeleteOperation: b.pathRoleSecretIDTTLDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-ttl"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-ttl"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "period", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "period": { Type: framework.TypeDurationSecond, @@ -704,222 +371,99 @@ to 0, meaning no expiration.`, Description: defTokenFields["token_period"].Description, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRolePeriodUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRolePeriodRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "period": { - Type: framework.TypeDurationSecond, - Required: false, - Description: tokenutil.DeprecationText("token_period"), - Deprecated: true, - }, - "token_period": { - Type: framework.TypeDurationSecond, - Required: true, - Description: defTokenFields["token_period"].Description, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRolePeriodDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRolePeriodUpdate, + logical.ReadOperation: b.pathRolePeriodRead, + logical.DeleteOperation: b.pathRolePeriodDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-period"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-period"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "token-num-uses", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "token_num_uses": { Type: framework.TypeInt, Description: defTokenFields["token_num_uses"].Description, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenNumUsesUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenNumUsesRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "token_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: defTokenFields["token_num_uses"].Description, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenNumUsesDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleTokenNumUsesUpdate, + logical.ReadOperation: b.pathRoleTokenNumUsesRead, + logical.DeleteOperation: b.pathRoleTokenNumUsesDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-token-num-uses"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-token-num-uses"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "token-ttl", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "token_ttl": { Type: framework.TypeDurationSecond, Description: defTokenFields["token_ttl"].Description, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenTTLUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenTTLRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "token_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: defTokenFields["token_ttl"].Description, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenTTLDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleTokenTTLUpdate, + logical.ReadOperation: b.pathRoleTokenTTLRead, + logical.DeleteOperation: b.pathRoleTokenTTLDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-token-ttl"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-token-ttl"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "token-max-ttl", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "token_max_ttl": { Type: framework.TypeDurationSecond, Description: defTokenFields["token_max_ttl"].Description, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenMaxTTLUpdate, - Responses: responseNoContent, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenMaxTTLRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "token_max_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: defTokenFields["token_max_ttl"].Description, - }, - }, - }}, - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleTokenMaxTTLDelete, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleTokenMaxTTLUpdate, + logical.ReadOperation: b.pathRoleTokenMaxTTLRead, + logical.DeleteOperation: b.pathRoleTokenMaxTTLDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-token-max-ttl"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-token-max-ttl"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "role-id", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "role_id": { Type: framework.TypeString, Description: "Identifier of the role. Defaults to a UUID.", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRoleRoleIDRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "role_id": { - Type: framework.TypeString, - Required: false, - Description: "Identifier of the role. Defaults to a UUID.", - }, - }, - }}, - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleRoleIDUpdate, - Responses: responseNoContent, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRoleIDRead, + logical.UpdateOperation: b.pathRoleRoleIDUpdate, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-id"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "metadata": { Type: framework.TypeString, @@ -948,284 +492,93 @@ Overrides secret_id_num_uses role option when supplied. May not be higher than r Overrides secret_id_ttl role option when supplied. May not be longer than role's secret_id_ttl.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDUpdate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id": { - Type: framework.TypeString, - Required: true, - Description: "Secret ID attached to the role.", - }, - "secret_id_accessor": { - Type: framework.TypeString, - Required: true, - Description: "Accessor of the secret ID", - }, - "secret_id_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "Duration in seconds after which the issued secret ID expires.", - }, - "secret_id_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", - }, - }, - }}, - }, - }, - logical.ListOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDList, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "secret-ids", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Required: true, - Type: framework.TypeStringSlice, - }, - }, - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDUpdate, + logical.ListOperation: b.pathRoleSecretIDList, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id", - OperationVerb: "look-up", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id": { Type: framework.TypeString, Description: "SecretID attached to the role.", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDLookupUpdate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id_accessor": { - Type: framework.TypeString, - Required: true, - Description: "Accessor of the secret ID", - }, - "secret_id_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "Duration in seconds after which the issued secret ID expires.", - }, - "secret_id_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", - }, - "creation_time": { - Type: framework.TypeTime, - Required: true, - }, - "expiration_time": { - Type: framework.TypeTime, - Required: true, - }, - "last_updated_time": { - Type: framework.TypeTime, - Required: true, - }, - "metadata": { - Type: framework.TypeKVPairs, - Required: true, - }, - "cidr_list": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: "List of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.", - }, - "token_bound_cidrs": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: "List of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.", - }, - }, - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDLookupUpdate, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-lookup"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-lookup"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationVerb: "destroy", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id": { Type: framework.TypeString, Description: "SecretID attached to the role.", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDDestroyUpdateDelete, - Responses: responseNoContent, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "secret-id", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDDestroyUpdateDelete, - Responses: responseNoContent, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "secret-id2", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDDestroyUpdateDelete, + logical.DeleteOperation: b.pathRoleSecretIDDestroyUpdateDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-destroy"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id-by-accessor", - OperationVerb: "look-up", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id_accessor": { Type: framework.TypeString, Description: "Accessor of the SecretID", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDAccessorLookupUpdate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id_accessor": { - Type: framework.TypeString, - Required: true, - Description: "Accessor of the secret ID", - }, - "secret_id_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "Duration in seconds after which the issued secret ID expires.", - }, - "secret_id_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", - }, - "creation_time": { - Type: framework.TypeTime, - Required: true, - }, - "expiration_time": { - Type: framework.TypeTime, - Required: true, - }, - "last_updated_time": { - Type: framework.TypeTime, - Required: true, - }, - "metadata": { - Type: framework.TypeKVPairs, - Required: true, - }, - "cidr_list": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: "List of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.", - }, - "token_bound_cidrs": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: "List of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.", - }, - }, - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDAccessorLookupUpdate, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationVerb: "destroy", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id_accessor": { Type: framework.TypeString, Description: "Accessor of the SecretID", }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, - Responses: responseNoContent, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "secret-id-by-accessor", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, - Responses: responseNoContent, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "secret-id-by-accessor2", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleSecretIDAccessorDestroyUpdateDelete, + logical.DeleteOperation: b.pathRoleSecretIDAccessorDestroyUpdateDelete, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]), }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "custom-secret-id", - }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, - Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + Description: "Name of the role.", }, "secret_id": { Type: framework.TypeString, @@ -1259,37 +612,8 @@ Overrides secret_id_num_uses role option when supplied. May not be higher than r Overrides secret_id_ttl role option when supplied. May not be longer than role's secret_id_ttl.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRoleCustomSecretIDUpdate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "secret_id": { - Type: framework.TypeString, - Required: true, - Description: "Secret ID attached to the role.", - }, - "secret_id_accessor": { - Type: framework.TypeString, - Required: true, - Description: "Accessor of the secret ID", - }, - "secret_id_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: "Duration in seconds after which the issued secret ID expires.", - }, - "secret_id_num_uses": { - Type: framework.TypeInt, - Required: true, - Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", - }, - }, - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRoleCustomSecretIDUpdate, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-custom-secret-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-custom-secret-id"][1]), @@ -1556,10 +880,6 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request return logical.ErrorResponse("missing role_name"), nil } - if len(roleName) > maxHmacInputLength { - return logical.ErrorResponse(fmt.Sprintf("role_name is longer than maximum of %d bytes", maxHmacInputLength)), nil - } - lock := b.roleLock(roleName) lock.Lock() defer lock.Unlock() diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index a5ea9d1d51599..136b288c9685c 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -15,23 +12,14 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/helper/policyutil" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) -func (b *backend) requestNoErr(t *testing.T, req *logical.Request) *logical.Response { - t.Helper() - resp, err := b.HandleRequest(context.Background(), req) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(req.Path), req.Operation), resp, true) - return resp -} - func TestAppRole_LocalSecretIDsRead(t *testing.T) { + var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -39,29 +27,37 @@ func TestAppRole_LocalSecretIDsRead(t *testing.T) { "bind_secret_id": true, } - b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.CreateOperation, Path: "role/testrole", Storage: storage, Data: roleData, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } - resp := b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.ReadOperation, Storage: storage, Path: "role/testrole/local-secret-ids", }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if !resp.Data["local_secret_ids"].(bool) { t.Fatalf("expected local_secret_ids to be returned") } } func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) // Create a role with local_secret_ids set - resp := b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole1", Operation: logical.CreateOperation, Storage: storage, @@ -71,9 +67,12 @@ func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { "local_secret_ids": true, }, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\n resp: %#v", err, resp) + } // Create another role without setting local_secret_ids - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole2", Operation: logical.CreateOperation, Storage: storage, @@ -82,43 +81,56 @@ func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { "bind_secret_id": true, }, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\n resp: %#v", err, resp) + } count := 10 // Create secret IDs on testrole1 for i := 0; i < count; i++ { - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole1/secret-id", Operation: logical.UpdateOperation, Storage: storage, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } } // Check the number of secret IDs generated - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole1/secret-id", Operation: logical.ListOperation, Storage: storage, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } if len(resp.Data["keys"].([]string)) != count { t.Fatalf("failed to list secret IDs") } // Create secret IDs on testrole1 for i := 0; i < count; i++ { - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole2/secret-id", Operation: logical.UpdateOperation, Storage: storage, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } } - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole2/secret-id", Operation: logical.ListOperation, Storage: storage, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } if len(resp.Data["keys"].([]string)) != count { t.Fatalf("failed to list secret IDs") } @@ -152,12 +164,14 @@ func TestAppRole_UpgradeSecretIDPrefix(t *testing.T) { } // Ensure that the API response contains local_secret_ids - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.ReadOperation, Storage: storage, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\n resp: %#v", err, resp) + } _, ok := resp.Data["local_secret_ids"] if !ok { t.Fatalf("expected local_secret_ids to be present in the response") @@ -178,12 +192,15 @@ func TestAppRole_LocalSecretIDImmutability(t *testing.T) { } // Create a role with local_secret_ids set - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.CreateOperation, Storage: storage, Data: roleData, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } // Attempt to modify local_secret_ids should fail resp, err = b.HandleRequest(context.Background(), &logical.Request{ @@ -213,19 +230,25 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { } // Create a role with bound_cidr_list set - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.CreateOperation, Storage: storage, Data: roleData, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } // Read the role and check that the bound_cidr_list is set properly - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.ReadOperation, Storage: storage, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } expected := []string{"127.0.0.1/18", "192.178.1.2/24"} actual := resp.Data["secret_id_bound_cidrs"].([]string) @@ -249,18 +272,20 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { } // Read the role. The upgrade code should have migrated the old type to the new type - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole", Operation: logical.ReadOperation, Storage: storage, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: bound_cidr_list; expected: %#v\nactual: %#v\n", expected, actual) } // Create a secret-id by supplying a subset of the role's CIDR blocks with the new type - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole/secret-id", Operation: logical.UpdateOperation, Storage: storage, @@ -268,13 +293,15 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { "cidr_list": []string{"127.0.0.1/24"}, }, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret-id") } // Check that the backwards compatibility for the string type is not broken - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrole/secret-id", Operation: logical.UpdateOperation, Storage: storage, @@ -282,7 +309,9 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { "cidr_list": "127.0.0.1/24", }, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret-id") } @@ -313,13 +342,15 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { Operation: logical.UpdateOperation, Storage: storage, } - resp = b.requestNoErr(t, secretIDReq) - + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } secretID = resp.Data["secret_id"].(string) roleID = "testroleid" // Regular login flow. This should succeed. - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Storage: storage, @@ -328,11 +359,16 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } // Lower case the role name when generating the secret id secretIDReq.Path = "role/testrolename/secret-id" - resp = b.requestNoErr(t, secretIDReq) - + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } secretID = resp.Data["secret_id"].(string) // Login should fail @@ -345,7 +381,7 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) - if err != nil && err != logical.ErrInvalidCredentials { + if err != nil { t.Fatal(err) } if resp == nil || !resp.IsError() { @@ -355,11 +391,14 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { // Delete the role and create it again. This time don't directly persist // it, but route the request to the creation handler so that it sets the // LowerCaseRoleName to true. - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testRoleName", Operation: logical.DeleteOperation, Storage: storage, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } roleReq := &logical.Request{ Path: "role/testRoleName", @@ -369,27 +408,34 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "bind_secret_id": true, }, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } // Create secret id with lower cased role name - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrolename/secret-id", Operation: logical.UpdateOperation, Storage: storage, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } secretID = resp.Data["secret_id"].(string) - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrolename/role-id", Operation: logical.ReadOperation, Storage: storage, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } roleID = resp.Data["role_id"].(string) // Login should pass - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Storage: storage, @@ -398,9 +444,12 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%v", resp, err) + } // Lookup of secret ID should work in case-insensitive manner - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrolename/secret-id/lookup", Operation: logical.UpdateOperation, Storage: storage, @@ -408,17 +457,22 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) - + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } if resp == nil { t.Fatalf("failed to lookup secret IDs") } // Listing of secret IDs should work in case-insensitive manner - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Path: "role/testrolename/secret-id", Operation: logical.ListOperation, Storage: storage, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } if len(resp.Data["keys"].([]string)) != 1 { t.Fatalf("failed to list secret IDs") @@ -441,7 +495,10 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { } // Create a role - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) + } roleIDReq := &logical.Request{ Path: "role/testrole/role-id", @@ -450,8 +507,10 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { } // Get the role ID - resp = b.requestNoErr(t, roleIDReq) - + resp, err = b.HandleRequest(context.Background(), roleIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) + } roleID := resp.Data["role_id"].(string) // Delete the role ID index @@ -462,7 +521,10 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { // Read the role again. This should add the index and return a warning roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) + } // Check if the warning is being returned if !strings.Contains(resp.Warnings[0], "Role identifier was missing an index back to role name.") { @@ -487,10 +549,15 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { // Check if updating and reading of roles work and that there are no lock // contentions dangling due to previous operation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) + } } func TestAppRole_CIDRSubset(t *testing.T) { @@ -512,7 +579,10 @@ func TestAppRole_CIDRSubset(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err: %v resp: %#v", err, resp) + } secretIDData := map[string]interface{}{ "cidr_list": "127.0.0.1/16", @@ -534,10 +604,19 @@ func TestAppRole_CIDRSubset(t *testing.T) { roleData["bound_cidr_list"] = "192.168.27.29/16,172.245.30.40/24,10.20.30.40/30" roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err: %v resp: %#v", err, resp) + } secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32" - resp = b.requestNoErr(t, secretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("resp: %#v", resp) + } } func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { @@ -559,7 +638,10 @@ func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err: %v resp: %#v", err, resp) + } secretIDData := map[string]interface{}{ "token_bound_cidrs": "127.0.0.1/32", @@ -571,7 +653,10 @@ func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { Data: secretIDData, } - resp = b.requestNoErr(t, secretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil { + t.Fatalf("err: %v resp: %#v", err, resp) + } secretIDData = map[string]interface{}{ "token_bound_cidrs": "127.0.0.1/24", @@ -611,13 +696,19 @@ func TestAppRole_RoleConstraints(t *testing.T) { } // Set bind_secret_id, which is enabled by default - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Set bound_cidr_list alone by explicitly disabling bind_secret_id roleReq.Operation = logical.UpdateOperation roleData["bind_secret_id"] = false roleData["bound_cidr_list"] = "0.0.0.0/0" - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Remove both constraints roleReq.Operation = logical.UpdateOperation @@ -634,6 +725,7 @@ func TestAppRole_RoleConstraints(t *testing.T) { func TestAppRole_RoleIDUpdate(t *testing.T) { var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -650,7 +742,10 @@ func TestAppRole_RoleIDUpdate(t *testing.T) { Storage: storage, Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleIDUpdateReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -660,15 +755,20 @@ func TestAppRole_RoleIDUpdate(t *testing.T) { "role_id": "customroleid", }, } - resp = b.requestNoErr(t, roleIDUpdateReq) + resp, err = b.HandleRequest(context.Background(), roleIDUpdateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretIDReq := &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, Path: "role/testrole1/secret-id", } - resp = b.requestNoErr(t, secretIDReq) - + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretID := resp.Data["secret_id"].(string) loginData := map[string]interface{}{ @@ -684,7 +784,10 @@ func TestAppRole_RoleIDUpdate(t *testing.T) { RemoteAddr: "127.0.0.1", }, } - resp = b.requestNoErr(t, loginReq) + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Auth == nil { t.Fatalf("expected a non-nil auth object in the response") @@ -711,7 +814,10 @@ func TestAppRole_RoleIDUniqueness(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Path = "role/testrole2" resp, err = b.HandleRequest(context.Background(), roleReq) @@ -720,7 +826,10 @@ func TestAppRole_RoleIDUniqueness(t *testing.T) { } roleData["role_id"] = "role-id-456" - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.UpdateOperation roleData["role_id"] = "role-id-123" @@ -758,15 +867,22 @@ func TestAppRole_RoleIDUniqueness(t *testing.T) { } roleIDData["role_id"] = "role-id-2000" - resp = b.requestNoErr(t, roleIDReq) + resp, err = b.HandleRequest(context.Background(), roleIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleIDData["role_id"] = "role-id-1000" roleIDReq.Path = "role/testrole1/role-id" - resp = b.requestNoErr(t, roleIDReq) + resp, err = b.HandleRequest(context.Background(), roleIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } } func TestAppRole_RoleDeleteSecretID(t *testing.T) { var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) createRole(t, b, storage, "role1", "a,b") @@ -776,17 +892,28 @@ func TestAppRole_RoleDeleteSecretID(t *testing.T) { Path: "role/role1/secret-id", } // Create 3 secrets on the role - resp = b.requestNoErr(t, secretIDReq) - resp = b.requestNoErr(t, secretIDReq) - resp = b.requestNoErr(t, secretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } listReq := &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role1/secret-id", } - resp = b.requestNoErr(t, listReq) - + resp, err = b.HandleRequest(context.Background(), listReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretIDAccessors := resp.Data["keys"].([]string) if len(secretIDAccessors) != 3 { t.Fatalf("bad: len of secretIDAccessors: expected:3 actual:%d", len(secretIDAccessors)) @@ -797,9 +924,11 @@ func TestAppRole_RoleDeleteSecretID(t *testing.T) { Storage: storage, Path: "role/role1", } - resp = b.requestNoErr(t, roleReq) - - resp, err := b.HandleRequest(context.Background(), listReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), listReq) if err != nil || resp == nil || (resp != nil && !resp.IsError()) { t.Fatalf("expected an error. err:%v resp:%#v", err, resp) } @@ -816,7 +945,10 @@ func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { Storage: storage, Path: "role/role1/secret-id", } - resp = b.requestNoErr(t, secretIDCreateReq) + resp, err = b.HandleRequest(context.Background(), secretIDCreateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secretID := resp.Data["secret_id"].(string) if secretID == "" { @@ -831,8 +963,10 @@ func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { "secret_id": secretID, }, } - resp = b.requestNoErr(t, secretIDReq) - + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data == nil { t.Fatal(err) } @@ -845,7 +979,11 @@ func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { "secret_id": secretID, }, } - resp = b.requestNoErr(t, deleteSecretIDReq) + resp, err = b.HandleRequest(context.Background(), deleteSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) if resp != nil && resp.IsError() { t.Fatalf("error response:%#v", resp) @@ -866,15 +1004,20 @@ func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) { Storage: storage, Path: "role/role1/secret-id", } - resp = b.requestNoErr(t, secretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } listReq := &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role1/secret-id", } - resp = b.requestNoErr(t, listReq) - + resp, err = b.HandleRequest(context.Background(), listReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } hmacSecretID := resp.Data["keys"].([]string)[0] hmacReq := &logical.Request{ @@ -885,14 +1028,19 @@ func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) { "secret_id_accessor": hmacSecretID, }, } - resp = b.requestNoErr(t, hmacReq) - + resp, err = b.HandleRequest(context.Background(), hmacReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data == nil { t.Fatal(err) } hmacReq.Path = "role/role1/secret-id-accessor/destroy" - resp = b.requestNoErr(t, hmacReq) + resp, err = b.HandleRequest(context.Background(), hmacReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } hmacReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), hmacReq) @@ -934,6 +1082,7 @@ func TestAppRoleSecretIDLookup(t *testing.T) { func TestAppRoleRoleListSecretID(t *testing.T) { var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) createRole(t, b, storage, "role1", "a,b") @@ -944,19 +1093,36 @@ func TestAppRoleRoleListSecretID(t *testing.T) { Path: "role/role1/secret-id", } // Create 5 'secret_id's - resp = b.requestNoErr(t, secretIDReq) - resp = b.requestNoErr(t, secretIDReq) - resp = b.requestNoErr(t, secretIDReq) - resp = b.requestNoErr(t, secretIDReq) - resp = b.requestNoErr(t, secretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } listReq := &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role1/secret-id/", } - resp = b.requestNoErr(t, listReq) - + resp, err = b.HandleRequest(context.Background(), listReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } secrets := resp.Data["keys"].([]string) if len(secrets) != 5 { t.Fatalf("bad: len of secrets: expected:5 actual:%d", len(secrets)) @@ -965,6 +1131,7 @@ func TestAppRoleRoleListSecretID(t *testing.T) { func TestAppRole_RoleList(t *testing.T) { var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) createRole(t, b, storage, "role1", "a,b") @@ -978,7 +1145,10 @@ func TestAppRole_RoleList(t *testing.T) { Path: "role", Storage: storage, } - resp = b.requestNoErr(t, listReq) + resp, err = b.HandleRequest(context.Background(), listReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } actual := resp.Data["keys"].([]string) expected := []string{"role1", "role2", "role3", "role4", "role5"} @@ -989,6 +1159,7 @@ func TestAppRole_RoleList(t *testing.T) { func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -1005,14 +1176,20 @@ func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) - - roleSecretIDReq := &logical.Request{ + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + roleSecretIDReq := &logical.Request{ Operation: logical.UpdateOperation, Path: "role/role1/secret-id", Storage: storage, } - resp = b.requestNoErr(t, roleSecretIDReq) + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret_id") @@ -1029,7 +1206,10 @@ func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { "secret_id": "abcd123", } roleSecretIDReq.Data = roleCustomSecretIDData - resp = b.requestNoErr(t, roleSecretIDReq) + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id"] != "abcd123" { t.Fatalf("failed to set specific secret_id to role") @@ -1049,6 +1229,7 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { } var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -1065,7 +1246,10 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } testCases := []testCase{ { @@ -1096,7 +1280,10 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { roleCustomSecretIDData := tc.payload roleSecretIDReq.Data = roleCustomSecretIDData - resp = b.requestNoErr(t, roleSecretIDReq) + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret_id") @@ -1110,7 +1297,10 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { roleSecretIDReq.Path = "role/role1/custom-secret-id" roleSecretIDReq.Data = roleCustomSecretIDData - resp = b.requestNoErr(t, roleSecretIDReq) + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id"] != tc.payload["secret_id"] { t.Fatalf("failed to set specific secret_id to role") @@ -1226,7 +1416,10 @@ func TestAppRole_ErrorsRoleSecretIDWithInvalidFields(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } for _, tc := range rc.cases { t.Run(fmt.Sprintf("%s/%s", rc.name, tc.name), func(t *testing.T) { @@ -1278,10 +1471,16 @@ func TestAppRole_RoleCRUD(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expected := map[string]interface{}{ "bind_secret_id": true, @@ -1324,10 +1523,16 @@ func TestAppRole_RoleCRUD(t *testing.T) { roleReq.Data = roleData roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expected = map[string]interface{}{ "policies": []string{"a", "b", "c", "d"}, @@ -1353,19 +1558,26 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RU for role_id field roleReq.Path = "role/role1/role-id" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["role_id"].(string) != "test_role_id" { t.Fatalf("bad: role_id: expected:test_role_id actual:%s\n", resp.Data["role_id"].(string)) } roleReq.Data = map[string]interface{}{"role_id": "custom_role_id"} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["role_id"].(string) != "custom_role_id" { t.Fatalf("bad: role_id: expected:custom_role_id actual:%s\n", resp.Data["role_id"].(string)) } @@ -1373,23 +1585,38 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for bind_secret_id field roleReq.Path = "role/role1/bind-secret-id" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"bind_secret_id": false} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["bind_secret_id"].(bool) { t.Fatalf("bad: bind_secret_id: expected:false actual:%t\n", resp.Data["bind_secret_id"].(bool)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if !resp.Data["bind_secret_id"].(bool) { t.Fatalf("expected the default value of 'true' to be set") @@ -1398,14 +1625,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for policies field roleReq.Path = "role/role1/policies" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"policies": "a1,b1,c1,d1"} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) @@ -1414,10 +1650,16 @@ func TestAppRole_RoleCRUD(t *testing.T) { t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expectedPolicies := []string{} actualPolicies := resp.Data["token_policies"].([]string) @@ -1428,23 +1670,38 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for secret-id-num-uses field roleReq.Path = "role/role1/secret-id-num-uses" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"secret_id_num_uses": 200} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id_num_uses"].(int) != 200 { t.Fatalf("bad: secret_id_num_uses: expected:200 actual:%d\n", resp.Data["secret_id_num_uses"].(int)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id_num_uses"].(int) != 0 { t.Fatalf("expected value to be reset") @@ -1453,23 +1710,38 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for secret_id_ttl field roleReq.Path = "role/role1/secret-id-ttl" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"secret_id_ttl": 3001} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id_ttl"].(time.Duration) != 3001 { t.Fatalf("bad: secret_id_ttl: expected:3001 actual:%d\n", resp.Data["secret_id_ttl"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id_ttl"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1478,28 +1750,42 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for secret-id-num-uses field roleReq.Path = "role/role1/token-num-uses" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_num_uses"].(int) != 600 { t.Fatalf("bad: token_num_uses: expected:600 actual:%d\n", resp.Data["token_num_uses"].(int)) } roleReq.Data = map[string]interface{}{"token_num_uses": 60} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_num_uses"].(int) != 60 { t.Fatalf("bad: token_num_uses: expected:60 actual:%d\n", resp.Data["token_num_uses"].(int)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_num_uses"].(int) != 0 { t.Fatalf("expected value to be reset") @@ -1508,23 +1794,38 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for 'period' field roleReq.Path = "role/role1/period" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"period": 9001} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["period"].(time.Duration) != 9001 { t.Fatalf("bad: period: expected:9001 actual:%d\n", resp.Data["9001"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_period"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1533,23 +1834,38 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for token_ttl field roleReq.Path = "role/role1/token-ttl" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"token_ttl": 4001} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_ttl"].(time.Duration) != 4001 { t.Fatalf("bad: token_ttl: expected:4001 actual:%d\n", resp.Data["token_ttl"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_ttl"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1558,23 +1874,38 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for token_max_ttl field roleReq.Path = "role/role1/token-max-ttl" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Data = map[string]interface{}{"token_max_ttl": 5001} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_max_ttl"].(time.Duration) != 5001 { t.Fatalf("bad: token_max_ttl: expected:5001 actual:%d\n", resp.Data["token_max_ttl"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_max_ttl"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1583,7 +1914,10 @@ func TestAppRole_RoleCRUD(t *testing.T) { // Delete test for role roleReq.Path = "role/role1" roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) @@ -1618,10 +1952,16 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expected := map[string]interface{}{ "bind_secret_id": true, @@ -1664,10 +2004,16 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { roleReq.Data = roleData roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expected = map[string]interface{}{ "policies": []string{"a", "b", "c", "d"}, @@ -1693,8 +2039,10 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { // RUD for secret-id-bound-cidrs field roleReq.Path = "role/role1/secret-id-bound-cidrs" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id_bound_cidrs"].([]string)[0] != "127.0.0.1/32" || resp.Data["secret_id_bound_cidrs"].([]string)[1] != "127.0.0.1/16" { t.Fatalf("bad: secret_id_bound_cidrs: expected:127.0.0.1/32,127.0.0.1/16 actual:%d\n", resp.Data["secret_id_bound_cidrs"].(int)) @@ -1702,20 +2050,32 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { roleReq.Data = map[string]interface{}{"secret_id_bound_cidrs": []string{"127.0.0.1/20"}} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["secret_id_bound_cidrs"].([]string)[0] != "127.0.0.1/20" { t.Fatalf("bad: secret_id_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["secret_id_bound_cidrs"].([]string)[0]) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if len(resp.Data["secret_id_bound_cidrs"].([]string)) != 0 { t.Fatalf("expected value to be reset") @@ -1724,8 +2084,10 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { // RUD for token-bound-cidrs field roleReq.Path = "role/role1/token-bound-cidrs" roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1" || resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[1].String() != "127.0.0.1/16" { m, err := json.Marshal(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) @@ -1737,20 +2099,32 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { roleReq.Data = map[string]interface{}{"token_bound_cidrs": []string{"127.0.0.1/20"}} roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1/20" { t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0]) } roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if len(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) != 0 { t.Fatalf("expected value to be reset") @@ -1759,13 +2133,17 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { // Delete test for role roleReq.Path = "role/role1" roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) } + if resp != nil { t.Fatalf("expected a nil response") } @@ -1792,14 +2170,19 @@ func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { Data: roleData, } - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if 0 == len(resp.Warnings) { t.Fatalf("bad:\nexpected warning in resp:%#v\n", resp.Warnings) } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expected := map[string]interface{}{ "bind_secret_id": true, @@ -1841,14 +2224,19 @@ func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { roleReq.Data = roleData roleReq.Operation = logical.UpdateOperation - resp = b.requestNoErr(t, roleReq) - + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } if 0 == len(resp.Warnings) { t.Fatalf("bad:\nexpected a warning in resp:%#v\n", resp.Warnings) } roleReq.Operation = logical.ReadOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } expected = map[string]interface{}{ "policies": []string{"a", "b", "c", "d"}, @@ -1875,13 +2263,17 @@ func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { // Delete test for role roleReq.Path = "role/role1" roleReq.Operation = logical.DeleteOperation - resp = b.requestNoErr(t, roleReq) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) } + if resp != nil { t.Fatalf("expected a nil response") } @@ -1901,7 +2293,11 @@ func createRole(t *testing.T, b *backend, s logical.Storage, roleName, policies Storage: s, Data: roleData, } - _ = b.requestNoErr(t, roleReq) + + resp, err := b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } } // TestAppRole_TokenutilUpgrade ensures that when we read values out that are @@ -2037,7 +2433,10 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { Storage: storage, Data: roleData, } - resp := b.requestNoErr(t, roleReq) + resp, err := b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Generate secret ID secretIDReq := &logical.Request{ @@ -2045,7 +2444,10 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { Path: "role/" + tt.roleName + "/secret-id", Storage: storage, } - resp = b.requestNoErr(t, secretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Extract the "ttl" value from the response data if it exists ttlRaw, okTTL := resp.Data["secret_id_ttl"] @@ -2059,7 +2461,7 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { ) respTTL, ok = ttlRaw.(int64) if !ok { - t.Fatalf("expected ttl to be an integer, got: %T", ttlRaw) + t.Fatalf("expected ttl to be an integer, got: %s", err) } // Verify secret ID response for different cases @@ -2086,30 +2488,39 @@ func TestAppRole_RoleSecretIDAccessorCrossDelete(t *testing.T) { // Create First Role createRole(t, b, storage, "role1", "a,b") - _ = b.requestNoErr(t, &logical.Request{ + _, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, Path: "role/role1/secret-id", }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Create Second Role createRole(t, b, storage, "role2", "a,b") - _ = b.requestNoErr(t, &logical.Request{ + _, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, Path: "role/role2/secret-id", }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Get role2 secretID Accessor - resp = b.requestNoErr(t, &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role2/secret-id", }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Read back role2 secretID Accessor information hmacSecretID := resp.Data["keys"].([]string)[0] - _ = b.requestNoErr(t, &logical.Request{ + _, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, Path: "role/role2/secret-id-accessor/lookup", @@ -2117,6 +2528,9 @@ func TestAppRole_RoleSecretIDAccessorCrossDelete(t *testing.T) { "secret_id_accessor": hmacSecretID, }, }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } // Attempt to destroy role2 secretID accessor using role1 path _, err = b.HandleRequest(context.Background(), &logical.Request{ diff --git a/builtin/credential/approle/path_tidy_user_id.go b/builtin/credential/approle/path_tidy_user_id.go index b6c777b149614..b7c6fcc6b1ec8 100644 --- a/builtin/credential/approle/path_tidy_user_id.go +++ b/builtin/credential/approle/path_tidy_user_id.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -20,21 +17,8 @@ func pathTidySecretID(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy/secret-id$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAppRole, - OperationSuffix: "secret-id", - OperationVerb: "tidy", - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidySecretIDUpdate, - Responses: map[int][]framework.Response{ - http.StatusAccepted: {{ - Description: http.StatusText(http.StatusAccepted), - }}, - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathTidySecretIDUpdate, }, HelpSynopsis: pathTidySecretIDSyn, diff --git a/builtin/credential/approle/path_tidy_user_id_test.go b/builtin/credential/approle/path_tidy_user_id_test.go index c03686e89cbf9..dd7dfdfd7d652 100644 --- a/builtin/credential/approle/path_tidy_user_id_test.go +++ b/builtin/credential/approle/path_tidy_user_id_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -11,11 +8,12 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" ) func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { + var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) // Create a role @@ -27,7 +25,10 @@ func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - _ = b.requestNoErr(t, roleSecretIDReq) + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } accessorHashes, err := storage.List(context.Background(), "accessor/") if err != nil { @@ -72,18 +73,12 @@ func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { t.Fatalf("bad: len(accessorHashes); expect 3, got %d", len(accessorHashes)) } - secret, err := b.tidySecretID(context.Background(), &logical.Request{ + _, err = b.tidySecretID(context.Background(), &logical.Request{ Storage: storage, }) if err != nil { t.Fatal(err) } - schema.ValidateResponse( - t, - schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), - secret, - true, - ) // It runs async so we give it a bit of time to run time.Sleep(10 * time.Second) @@ -98,6 +93,8 @@ func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { } func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { + var resp *logical.Response + var err error b, storage := createBackendWithStorage(t) // Create a role @@ -109,26 +106,22 @@ func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - _ = b.requestNoErr(t, roleSecretIDReq) - + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } count := 1 wg := &sync.WaitGroup{} start := time.Now() for time.Now().Sub(start) < 10*time.Second { if time.Now().Sub(start) > 100*time.Millisecond && atomic.LoadUint32(b.tidySecretIDCASGuard) == 0 { - secret, err := b.tidySecretID(context.Background(), &logical.Request{ + _, err = b.tidySecretID(context.Background(), &logical.Request{ Storage: storage, }) if err != nil { t.Fatal(err) } - schema.ValidateResponse( - t, - schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), - secret, - true, - ) } wg.Add(1) go func() { @@ -138,7 +131,10 @@ func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - _ = b.requestNoErr(t, roleSecretIDReq) + resp, err := b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } }() entry, err := logical.StorageEntryJSON( @@ -177,12 +173,6 @@ func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { if err != nil || len(secret.Warnings) > 0 { t.Fatal(err, secret.Warnings) } - schema.ValidateResponse( - t, - schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), - secret, - true, - ) // Wait for tidy to start for atomic.LoadUint32(b.tidySecretIDCASGuard) == 0 { diff --git a/builtin/credential/approle/validation.go b/builtin/credential/approle/validation.go index 70f2194aa6783..6ff901670980a 100644 --- a/builtin/credential/approle/validation.go +++ b/builtin/credential/approle/validation.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -104,7 +101,7 @@ func verifyCIDRRoleSecretIDSubset(secretIDCIDRs []string, roleBoundCIDRList []st return nil } -const maxHmacInputLength = 4096 +const maxHmacInputLength = 1024 // Creates a SHA256 HMAC of the given 'value' using the given 'key' and returns // a hex encoded string. diff --git a/builtin/credential/approle/validation_test.go b/builtin/credential/approle/validation_test.go index 7f7366b679377..ff325f4b1be95 100644 --- a/builtin/credential/approle/validation_test.go +++ b/builtin/credential/approle/validation_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index e8424f2c4956c..543608968396f 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -20,10 +17,7 @@ import ( cache "github.com/patrickmn/go-cache" ) -const ( - amzHeaderPrefix = "X-Amz-" - operationPrefixAWS = "aws" -) +const amzHeaderPrefix = "X-Amz-" var defaultAllowedSTSRequestHeaders = []string{ "X-Amz-Algorithm", @@ -129,9 +123,7 @@ func Backend(_ *logical.BackendConfig) (*backend, error) { deprecatedTerms: strings.NewReplacer( "accesslist", "whitelist", - "access-list", "whitelist", "denylist", "blacklist", - "deny-list", "blacklist", ), } @@ -317,7 +309,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag switch entity.Type { case "user": - userInfo, err := iamClient.GetUserWithContext(ctx, &iam.GetUserInput{UserName: &entity.FriendlyName}) + userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -326,7 +318,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } return *userInfo.User.UserId, nil case "role": - roleInfo, err := iamClient.GetRoleWithContext(ctx, &iam.GetRoleInput{RoleName: &entity.FriendlyName}) + roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -335,7 +327,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } return *roleInfo.Role.RoleId, nil case "instance-profile": - profileInfo, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) + profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -348,33 +340,13 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } } -// genDeprecatedPath will return a deprecated version of a framework.Path. The -// path pattern and display attributes (if any) will contain deprecated terms, -// and the path will be marked as deprecated. +// genDeprecatedPath will return a deprecated version of a framework.Path. The will include +// using deprecated terms in the path pattern, and marking the path as deprecated. func (b *backend) genDeprecatedPath(path *framework.Path) *framework.Path { pathDeprecated := *path pathDeprecated.Pattern = b.deprecatedTerms.Replace(path.Pattern) pathDeprecated.Deprecated = true - if path.DisplayAttrs != nil { - deprecatedDisplayAttrs := *path.DisplayAttrs - deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationPrefix) - deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationVerb) - deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationSuffix) - pathDeprecated.DisplayAttrs = &deprecatedDisplayAttrs - } - - for i, op := range path.Operations { - if op.Properties().DisplayAttrs != nil { - deprecatedDisplayAttrs := *op.Properties().DisplayAttrs - deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationPrefix) - deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationVerb) - deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationSuffix) - deprecatedProperties := pathDeprecated.Operations[i].(*framework.PathOperation) - deprecatedProperties.DisplayAttrs = &deprecatedDisplayAttrs - } - } - return &pathDeprecated } diff --git a/builtin/credential/aws/backend_e2e_test.go b/builtin/credential/aws/backend_e2e_test.go index e8939b9d67afd..ac2bb22f129ae 100644 --- a/builtin/credential/aws/backend_e2e_test.go +++ b/builtin/credential/aws/backend_e2e_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index dea280c00262d..5b435d3e3a5ce 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -1524,7 +1521,6 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { return } - ctx := context.Background() storage := &logical.InmemStorage{} config := logical.TestBackendConfig() config.StorageView = storage @@ -1603,7 +1599,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: clientConfigData, } - _, err = b.HandleRequest(ctx, clientRequest) + _, err = b.HandleRequest(context.Background(), clientRequest) if err != nil { t.Fatal(err) } @@ -1617,7 +1613,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: configIdentityData, } - resp, err := b.HandleRequest(ctx, configIdentityRequest) + resp, err := b.HandleRequest(context.Background(), configIdentityRequest) if err != nil { t.Fatal(err) } @@ -1637,7 +1633,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: roleData, } - resp, err = b.HandleRequest(ctx, roleRequest) + resp, err = b.HandleRequest(context.Background(), roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err) } @@ -1654,7 +1650,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: roleDataEc2, } - resp, err = b.HandleRequest(ctx, roleRequestEc2) + resp, err = b.HandleRequest(context.Background(), roleRequestEc2) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err) } @@ -1692,7 +1688,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: loginData, } - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to missing header: resp:%#v\nerr:%v", resp, err) } @@ -1715,7 +1711,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: loginData, } - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to invalid header: resp:%#v\nerr:%v", resp, err) } @@ -1734,13 +1730,13 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: loginData, } - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to invalid role: resp:%#v\nerr:%v", resp, err) } loginData["role"] = "ec2only" - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err) } @@ -1748,7 +1744,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // finally, the happy path test :) loginData["role"] = testValidRoleName - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil { t.Fatal(err) } @@ -1771,7 +1767,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Schema: b.pathLogin().Fields, } // ensure we can renew - resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) + resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) if err != nil { t.Fatal(err) } @@ -1789,17 +1785,17 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // pick up the fake user ID roleData["bound_iam_principal_arn"] = entity.canonicalArn() roleRequest.Path = "role/" + testValidRoleName - resp, err = b.HandleRequest(ctx, roleRequest) + resp, err = b.HandleRequest(context.Background(), roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to recreate role: resp:%#v\nerr:%v", resp, err) } - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to changed AWS role ID: resp: %#v\nerr:%v", resp, err) } // and ensure a renew no longer works - resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) + resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) if err == nil || (resp != nil && !resp.IsError()) { t.Errorf("bad: expected failed renew due to changed AWS role ID: resp: %#v", resp) } @@ -1812,13 +1808,13 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { wildcardEntity.FriendlyName = "*" roleData["bound_iam_principal_arn"] = []string{wildcardEntity.canonicalArn(), "arn:aws:iam::123456789012:role/DoesNotExist/Vault_Fake_Role*"} roleRequest.Path = "role/" + wildcardRoleName - resp, err = b.HandleRequest(ctx, roleRequest) + resp, err = b.HandleRequest(context.Background(), roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create wildcard roles: resp:%#v\nerr:%v", resp, err) } loginData["role"] = wildcardRoleName - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil { t.Fatal(err) } @@ -1827,7 +1823,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { } // and ensure we can renew renewReq = generateRenewRequest(storage, resp.Auth) - resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) + resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) if err != nil { t.Fatal(err) } @@ -1838,17 +1834,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { t.Fatalf("got error when renewing: %#v", *resp) } // ensure the cache is populated - - clientUserIDRaw, ok := resp.Auth.InternalData["client_user_id"] - if !ok { - t.Errorf("client_user_id not found in response") - } - clientUserID, ok := clientUserIDRaw.(string) - if !ok { - t.Errorf("client_user_id is not a string: %#v", clientUserIDRaw) - } - - cachedArn := b.getCachedUserId(clientUserID) + cachedArn := b.getCachedUserId(resp.Auth.Metadata["client_user_id"]) if cachedArn == "" { t.Errorf("got empty ARN back from user ID cache; expected full arn") } @@ -1857,13 +1843,13 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { period := 600 * time.Second roleData["period"] = period.String() roleRequest.Path = "role/" + testValidRoleName - resp, err = b.HandleRequest(ctx, roleRequest) + resp, err = b.HandleRequest(context.Background(), roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create wildcard role: resp:%#v\nerr:%v", resp, err) } loginData["role"] = testValidRoleName - resp, err = b.HandleRequest(ctx, loginRequest) + resp, err = b.HandleRequest(context.Background(), loginRequest) if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/certificates.go b/builtin/credential/aws/certificates.go index 4b97a952b6fac..c745ad2b3f243 100644 --- a/builtin/credential/aws/certificates.go +++ b/builtin/credential/aws/certificates.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go index a1695574f3b69..7b063fa5f42a0 100644 --- a/builtin/credential/aws/cli.go +++ b/builtin/credential/aws/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/client.go b/builtin/credential/aws/client.go index 314c97ec395aa..ff8ff5c837f9b 100644 --- a/builtin/credential/aws/client.go +++ b/builtin/credential/aws/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -122,7 +119,7 @@ func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region return nil, fmt.Errorf("could not obtain sts client: %w", err) } inputParams := &sts.GetCallerIdentityInput{} - identity, err := client.GetCallerIdentityWithContext(ctx, inputParams) + identity, err := client.GetCallerIdentity(inputParams) if err != nil { return nil, fmt.Errorf("unable to fetch current caller: %w", err) } diff --git a/builtin/credential/aws/cmd/aws/main.go b/builtin/credential/aws/cmd/aws/main.go index c7fce3e33fab9..6de96d02d1964 100644 --- a/builtin/credential/aws/cmd/aws/main.go +++ b/builtin/credential/aws/cmd/aws/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: awsauth.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/aws/path_config_certificate.go b/builtin/credential/aws/path_config_certificate.go index 36dfe3c213932..f734694781b35 100644 --- a/builtin/credential/aws/path_config_certificate.go +++ b/builtin/credential/aws/path_config_certificate.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -21,11 +18,6 @@ func (b *backend) pathListCertificates() *framework.Path { return &framework.Path{ Pattern: "config/certificates/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "certificate-configurations", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathCertificatesList, @@ -40,11 +32,6 @@ func (b *backend) pathListCertificates() *framework.Path { func (b *backend) pathConfigCertificate() *framework.Path { return &framework.Path{ Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "cert_name": { Type: framework.TypeString, @@ -71,29 +58,15 @@ vary. Defaults to "pkcs7".`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "certificate", - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "certificate", - }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "certificate-configuration", - }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "certificate-configuration", - }, }, }, diff --git a/builtin/credential/aws/path_config_client.go b/builtin/credential/aws/path_config_client.go index 979fac11a9d8c..c609e1acd608a 100644 --- a/builtin/credential/aws/path_config_client.go +++ b/builtin/credential/aws/path_config_client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -19,11 +16,6 @@ import ( func (b *backend) pathConfigClient() *framework.Path { return &framework.Path{ Pattern: "config/client$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "access_key": { Type: framework.TypeString, @@ -85,29 +77,15 @@ func (b *backend) pathConfigClient() *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigClientCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "client", - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigClientCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "client", - }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigClientDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "client-configuration", - }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigClientRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "client-configuration", - }, }, }, diff --git a/builtin/credential/aws/path_config_client_test.go b/builtin/credential/aws/path_config_client_test.go index 4c807d1b40f8c..493d20d9df000 100644 --- a/builtin/credential/aws/path_config_client_test.go +++ b/builtin/credential/aws/path_config_client_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go index 2512c9db39fd2..282d277fab547 100644 --- a/builtin/credential/aws/path_config_identity.go +++ b/builtin/credential/aws/path_config_identity.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -57,11 +54,6 @@ var ( func (b *backend) pathConfigIdentity() *framework.Path { return &framework.Path{ Pattern: "config/identity$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "iam_alias": { Type: framework.TypeString, @@ -80,16 +72,9 @@ func (b *backend) pathConfigIdentity() *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: pathConfigIdentityRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "identity-integration-configuration", - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: pathConfigIdentityUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "identity-integration", - }, }, }, diff --git a/builtin/credential/aws/path_config_identity_test.go b/builtin/credential/aws/path_config_identity_test.go index 8a7db09f2bbdc..19e919fb1179d 100644 --- a/builtin/credential/aws/path_config_identity_test.go +++ b/builtin/credential/aws/path_config_identity_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/path_config_rotate_root.go b/builtin/credential/aws/path_config_rotate_root.go index 141b7e2fbea57..125056234312b 100644 --- a/builtin/credential/aws/path_config_rotate_root.go +++ b/builtin/credential/aws/path_config_rotate_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -24,12 +21,6 @@ func (b *backend) pathConfigRotateRoot() *framework.Path { return &framework.Path{ Pattern: "config/rotate-root", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationVerb: "rotate", - OperationSuffix: "root-credentials", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigRotateRootUpdate, @@ -106,7 +97,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R // Get the current user's name since it's required to create an access key. // Empty input means get the current user. var getUserInput iam.GetUserInput - getUserRes, err := iamClient.GetUserWithContext(ctx, &getUserInput) + getUserRes, err := iamClient.GetUser(&getUserInput) if err != nil { return nil, fmt.Errorf("error calling GetUser: %w", err) } @@ -124,7 +115,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R createAccessKeyInput := iam.CreateAccessKeyInput{ UserName: getUserRes.User.UserName, } - createAccessKeyRes, err := iamClient.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) + createAccessKeyRes, err := iamClient.CreateAccessKey(&createAccessKeyInput) if err != nil { return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) } @@ -148,7 +139,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: createAccessKeyRes.AccessKey.AccessKeyId, UserName: getUserRes.User.UserName, } - if _, err := iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { + if _, err := iamClient.DeleteAccessKey(&deleteAccessKeyInput); err != nil { // Include this error in the errs returned by this method. errs = multierror.Append(errs, fmt.Errorf("error deleting newly created but unstored access key ID %s: %s", *createAccessKeyRes.AccessKey.AccessKeyId, err)) } @@ -185,7 +176,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: aws.String(oldAccessKey), UserName: getUserRes.User.UserName, } - if _, err = iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { + if _, err = iamClient.DeleteAccessKey(&deleteAccessKeyInput); err != nil { errs = multierror.Append(errs, fmt.Errorf("error deleting old access key ID %s: %w", oldAccessKey, err)) return nil, errs } diff --git a/builtin/credential/aws/path_config_rotate_root_test.go b/builtin/credential/aws/path_config_rotate_root_test.go index 3fe5b29c0479c..940c6d1022709 100644 --- a/builtin/credential/aws/path_config_rotate_root_test.go +++ b/builtin/credential/aws/path_config_rotate_root_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -8,7 +5,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" @@ -16,23 +12,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -type mockIAMClient awsutil.MockIAM - -func (m *mockIAMClient) GetUserWithContext(_ aws.Context, input *iam.GetUserInput, _ ...request.Option) (*iam.GetUserOutput, error) { - return (*awsutil.MockIAM)(m).GetUser(input) -} - -func (m *mockIAMClient) CreateAccessKeyWithContext(_ aws.Context, input *iam.CreateAccessKeyInput, _ ...request.Option) (*iam.CreateAccessKeyOutput, error) { - return (*awsutil.MockIAM)(m).CreateAccessKey(input) -} - -func (m *mockIAMClient) DeleteAccessKeyWithContext(_ aws.Context, input *iam.DeleteAccessKeyInput, _ ...request.Option) (*iam.DeleteAccessKeyOutput, error) { - return (*awsutil.MockIAM)(m).DeleteAccessKey(input) -} - func TestPathConfigRotateRoot(t *testing.T) { getIAMClient = func(sess *session.Session) iamiface.IAMAPI { - return &mockIAMClient{ + return &awsutil.MockIAM{ CreateAccessKeyOutput: &iam.CreateAccessKeyOutput{ AccessKey: &iam.AccessKey{ AccessKeyId: aws.String("fizz2"), diff --git a/builtin/credential/aws/path_config_sts.go b/builtin/credential/aws/path_config_sts.go index 50d986d20c84e..3666a90041793 100644 --- a/builtin/credential/aws/path_config_sts.go +++ b/builtin/credential/aws/path_config_sts.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -20,11 +17,6 @@ func (b *backend) pathListSts() *framework.Path { return &framework.Path{ Pattern: "config/sts/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "sts-role-relationships", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathStsList, @@ -39,12 +31,6 @@ func (b *backend) pathListSts() *framework.Path { func (b *backend) pathConfigSts() *framework.Path { return &framework.Path{ Pattern: "config/sts/" + framework.GenericNameRegex("account_id"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "sts-role", - }, - Fields: map[string]*framework.FieldSchema{ "account_id": { Type: framework.TypeString, diff --git a/builtin/credential/aws/path_config_tidy_identity_accesslist.go b/builtin/credential/aws/path_config_tidy_identity_accesslist.go index 686b0263c1aa9..f89c5ab215974 100644 --- a/builtin/credential/aws/path_config_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_config_tidy_identity_accesslist.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -18,11 +15,6 @@ const ( func (b *backend) pathConfigTidyIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("%s$", "config/tidy/identity-accesslist"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, @@ -42,29 +34,15 @@ expiration, before it is removed from the backend storage.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "identity-access-list-tidy-operation", - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "identity-access-list-tidy-operation", - }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "identity-access-list-tidy-settings", - }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "identity-access-list-tidy-settings", - }, }, }, diff --git a/builtin/credential/aws/path_config_tidy_roletag_denylist.go b/builtin/credential/aws/path_config_tidy_roletag_denylist.go index fa82b77d25f2a..e00404d7ec647 100644 --- a/builtin/credential/aws/path_config_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_config_tidy_roletag_denylist.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -17,11 +14,6 @@ const ( func (b *backend) pathConfigTidyRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "config/tidy/roletag-denylist$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, @@ -43,29 +35,15 @@ Defaults to 4320h (180 days).`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "role-tag-deny-list-tidy-operation", - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "role-tag-deny-list-tidy-operation", - }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "role-tag-deny-list-tidy-settings", - }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "role-tag-deny-list-tidy-settings", - }, }, }, diff --git a/builtin/credential/aws/path_identity_accesslist.go b/builtin/credential/aws/path_identity_accesslist.go index 77ec5749496df..a622b7d8f962f 100644 --- a/builtin/credential/aws/path_identity_accesslist.go +++ b/builtin/credential/aws/path_identity_accesslist.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -16,12 +13,6 @@ const identityAccessListStorage = "whitelist/identity/" func (b *backend) pathIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "identity-accesslist/" + framework.GenericNameRegex("instance_id"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "identity-access-list", - }, - Fields: map[string]*framework.FieldSchema{ "instance_id": { Type: framework.TypeString, @@ -48,11 +39,6 @@ func (b *backend) pathListIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "identity-accesslist/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "identity-access-list", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathAccessListIdentitiesList, diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index f4041b38db839..fb8ab4f47492f 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -55,10 +52,6 @@ var ( func (b *backend) pathLogin() *framework.Path { return &framework.Path{ Pattern: "login$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationVerb: "login", - }, Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -110,8 +103,8 @@ This must match the request body included in the signature.`, "iam_request_headers": { Type: framework.TypeHeader, Description: `Key/value pairs of headers for use in the -sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either -a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. +sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either +a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. This must at a minimum include the headers over which AWS has included a signature.`, }, "identity": { @@ -344,7 +337,7 @@ func (b *backend) pathLoginResolveRoleIam(ctx context.Context, req *logical.Requ // instanceIamRoleARN fetches the IAM role ARN associated with the given // instance profile name -func (b *backend) instanceIamRoleARN(ctx context.Context, iamClient *iam.IAM, instanceProfileName string) (string, error) { +func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName string) (string, error) { if iamClient == nil { return "", fmt.Errorf("nil iamClient") } @@ -352,7 +345,7 @@ func (b *backend) instanceIamRoleARN(ctx context.Context, iamClient *iam.IAM, in return "", fmt.Errorf("missing instance profile name") } - profile, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{ + profile, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{ InstanceProfileName: aws.String(instanceProfileName), }) if err != nil { @@ -386,7 +379,7 @@ func (b *backend) validateInstance(ctx context.Context, s logical.Storage, insta return nil, err } - status, err := ec2Client.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{ + status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{ aws.String(instanceID), }, @@ -728,7 +721,7 @@ func (b *backend) verifyInstanceMeetsRoleRequirements(ctx context.Context, } else if iamClient == nil { return nil, fmt.Errorf("received a nil iamClient") } - iamRoleARN, err := b.instanceIamRoleARN(ctx, iamClient, iamInstanceProfileEntity.FriendlyName) + iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileEntity.FriendlyName) if err != nil { return nil, fmt.Errorf("IAM role ARN could not be fetched: %w", err) } @@ -1839,7 +1832,7 @@ func (b *backend) fullArn(ctx context.Context, e *iamEntity, s logical.Storage) input := iam.GetUserInput{ UserName: aws.String(e.FriendlyName), } - resp, err := client.GetUserWithContext(ctx, &input) + resp, err := client.GetUser(&input) if err != nil { return "", fmt.Errorf("error fetching user %q: %w", e.FriendlyName, err) } @@ -1853,7 +1846,7 @@ func (b *backend) fullArn(ctx context.Context, e *iamEntity, s logical.Storage) input := iam.GetRoleInput{ RoleName: aws.String(e.FriendlyName), } - resp, err := client.GetRoleWithContext(ctx, &input) + resp, err := client.GetRole(&input) if err != nil { return "", fmt.Errorf("error fetching role %q: %w", e.FriendlyName, err) } diff --git a/builtin/credential/aws/path_login_test.go b/builtin/credential/aws/path_login_test.go index 2c0262075ad38..6ffd60ed14943 100644 --- a/builtin/credential/aws/path_login_test.go +++ b/builtin/credential/aws/path_login_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go index 1c9ecf2728327..12a4c7d0f2d9f 100644 --- a/builtin/credential/aws/path_role.go +++ b/builtin/credential/aws/path_role.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -23,12 +20,6 @@ var currentRoleStorageVersion = 3 func (b *backend) pathRole() *framework.Path { p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "auth-role", - }, - Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -87,9 +78,6 @@ auth_type is ec2 or inferred_entity_type is ec2_instance.`, given instance IDs. Can be a list or comma-separated string of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.`, - DisplayAttrs: &framework.DisplayAttributes{ - Description: "If set, defines a constraint on the EC2 instances to have one of the given instance IDs. A list of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.", - }, }, "resolve_aws_unique_ids": { Type: framework.TypeBool, @@ -211,11 +199,6 @@ func (b *backend) pathListRole() *framework.Path { return &framework.Path{ Pattern: "role/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "auth-roles", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, @@ -231,11 +214,6 @@ func (b *backend) pathListRoles() *framework.Path { return &framework.Path{ Pattern: "roles/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "auth-roles2", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, diff --git a/builtin/credential/aws/path_role_tag.go b/builtin/credential/aws/path_role_tag.go index 180b4105c69c8..15927a82a2bb6 100644 --- a/builtin/credential/aws/path_role_tag.go +++ b/builtin/credential/aws/path_role_tag.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -26,12 +23,6 @@ const roleTagVersion = "v1" func (b *backend) pathRoleTag() *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "role-tag", - }, - Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go index 3a63d4cd35553..b8e824a9d27c4 100644 --- a/builtin/credential/aws/path_role_test.go +++ b/builtin/credential/aws/path_role_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( diff --git a/builtin/credential/aws/path_roletag_denylist.go b/builtin/credential/aws/path_roletag_denylist.go index 82004363530bb..19520aab2f59f 100644 --- a/builtin/credential/aws/path_roletag_denylist.go +++ b/builtin/credential/aws/path_roletag_denylist.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -15,12 +12,6 @@ import ( func (b *backend) pathRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "roletag-denylist/(?P.*)", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "role-tag-deny-list", - }, - Fields: map[string]*framework.FieldSchema{ "role_tag": { Type: framework.TypeString, @@ -51,11 +42,6 @@ func (b *backend) pathListRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "roletag-denylist/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "role-tag-deny-lists", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoletagDenyListsList, diff --git a/builtin/credential/aws/path_tidy_identity_accesslist.go b/builtin/credential/aws/path_tidy_identity_accesslist.go index 3b907c43d3788..9455cc0d3df1a 100644 --- a/builtin/credential/aws/path_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_tidy_identity_accesslist.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -18,13 +15,6 @@ import ( func (b *backend) pathTidyIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "tidy/identity-accesslist$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "identity-access-list", - OperationVerb: "tidy", - }, - Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, diff --git a/builtin/credential/aws/path_tidy_roletag_denylist.go b/builtin/credential/aws/path_tidy_roletag_denylist.go index ddd1f7944d573..80c9dd8afea72 100644 --- a/builtin/credential/aws/path_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_tidy_roletag_denylist.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package awsauth import ( @@ -22,13 +19,6 @@ const ( func (b *backend) pathTidyRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "tidy/roletag-denylist$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "role-tag-deny-list", - OperationVerb: "tidy", - }, - Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, diff --git a/builtin/credential/aws/pkcs7/LICENSE b/builtin/credential/aws/pkcs7/LICENSE deleted file mode 100644 index f2fc6efbfcdc6..0000000000000 --- a/builtin/credential/aws/pkcs7/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Andrew Smith - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/builtin/credential/cert/backend.go b/builtin/credential/cert/backend.go index 61dd88988e361..542735dfd4d88 100644 --- a/builtin/credential/cert/backend.go +++ b/builtin/credential/cert/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -11,23 +8,21 @@ import ( "net/http" "strings" "sync" - "sync/atomic" "time" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/logical" ) -const operationPrefixCert = "cert" - func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { return nil, err } + if err := b.lockThenpopulateCRLs(ctx, conf.StorageView); err != nil { + return nil, err + } return b, nil } @@ -45,17 +40,16 @@ func Backend() *backend { pathLogin(&b), pathListCerts(&b), pathCerts(&b), - pathListCRLs(&b), pathCRLs(&b), }, - AuthRenew: b.loginPathWrapper(b.pathLoginRenew), - Invalidate: b.invalidate, - BackendType: logical.TypeCredential, - InitializeFunc: b.initialize, - PeriodicFunc: b.updateCRLs, + AuthRenew: b.loginPathWrapper(b.pathLoginRenew), + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, + PeriodicFunc: b.updateCRLs, } b.crlUpdateMutex = &sync.RWMutex{} + return &b } @@ -63,30 +57,8 @@ type backend struct { *framework.Backend MapCertId *framework.PathMap - crls map[string]CRLInfo - crlUpdateMutex *sync.RWMutex - ocspClientMutex sync.RWMutex - ocspClient *ocsp.Client - configUpdated atomic.Bool -} - -func (b *backend) initialize(ctx context.Context, req *logical.InitializationRequest) error { - bConf, err := b.Config(ctx, req.Storage) - if err != nil { - b.Logger().Error(fmt.Sprintf("failed to load backend configuration: %v", err)) - return err - } - - if bConf != nil { - b.updatedConfig(bConf) - } - - if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { - b.Logger().Error(fmt.Sprintf("failed to populate CRLs: %v", err)) - return err - } - - return nil + crls map[string]CRLInfo + crlUpdateMutex *sync.RWMutex } func (b *backend) invalidate(_ context.Context, key string) { @@ -95,25 +67,9 @@ func (b *backend) invalidate(_ context.Context, key string) { b.crlUpdateMutex.Lock() defer b.crlUpdateMutex.Unlock() b.crls = nil - case key == "config": - b.configUpdated.Store(true) } } -func (b *backend) initOCSPClient(cacheSize int) { - b.ocspClient = ocsp.New(func() hclog.Logger { - return b.Logger() - }, cacheSize) -} - -func (b *backend) updatedConfig(config *config) { - b.ocspClientMutex.Lock() - defer b.ocspClientMutex.Unlock() - b.initOCSPClient(config.OcspCacheSize) - b.configUpdated.Store(false) - return -} - func (b *backend) fetchCRL(ctx context.Context, storage logical.Storage, name string, crl *CRLInfo) error { response, err := http.Get(crl.CDP.Url) if err != nil { @@ -148,19 +104,6 @@ func (b *backend) updateCRLs(ctx context.Context, req *logical.Request) error { return errs.ErrorOrNil() } -func (b *backend) storeConfig(ctx context.Context, storage logical.Storage, config *config) error { - entry, err := logical.StorageEntryJSON("config", config) - if err != nil { - return err - } - - if err := storage.Put(ctx, entry); err != nil { - return err - } - b.updatedConfig(config) - return nil -} - const backendHelp = ` The "cert" credential provider allows authentication using TLS client certificates. A client connects to Vault and uses diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index 47d7ae05d5442..4365df47716ae 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -448,7 +445,7 @@ func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { } // Create a new api client with the desired TLS configuration - newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig()) + newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig) secret, err = newClient.Logical().Write("auth/cert/login", map[string]interface{}{ "name": "myvault-dot-com", @@ -598,7 +595,7 @@ path "kv/ext/{{identity.entity.aliases.%s.metadata.2-1-1-1}}" { } // Create a new api client with the desired TLS configuration - newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig()) + newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig) var secret *api.Secret @@ -928,21 +925,6 @@ func TestBackend_RegisteredNonCA_CRL(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - // Ensure the CRL shows up on a list. - listReq := &logical.Request{ - Operation: logical.ListOperation, - Storage: storage, - Path: "crls", - Data: map[string]interface{}{}, - } - resp, err = b.HandleRequest(context.Background(), listReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - if len(resp.Data) != 1 || len(resp.Data["keys"].([]string)) != 1 || resp.Data["keys"].([]string)[0] != "issuedcrl" { - t.Fatalf("bad listing: resp:%v", resp) - } - // Attempt login with the same connection state but with the CRL registered resp, err = b.HandleRequest(context.Background(), loginReq) if err != nil { @@ -1095,22 +1077,16 @@ func TestBackend_CRLs(t *testing.T) { } func testFactory(t *testing.T) logical.Backend { - storage := &logical.InmemStorage{} b, err := Factory(context.Background(), &logical.BackendConfig{ System: &logical.StaticSystemView{ DefaultLeaseTTLVal: 1000 * time.Second, MaxLeaseTTLVal: 1800 * time.Second, }, - StorageView: storage, + StorageView: &logical.InmemStorage{}, }) if err != nil { t.Fatalf("error: %s", err) } - if err := b.Initialize(context.Background(), &logical.InitializationRequest{ - Storage: storage, - }); err != nil { - t.Fatalf("error: %s", err) - } return b } @@ -1932,58 +1908,31 @@ type allowed struct { metadata_ext string // allowed metadata extensions to add to identity alias } -func testAccStepCert(t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool) logicaltest.TestStep { - return testAccStepCertWithExtraParams(t, name, cert, policies, testData, expectError, nil) -} - -func testAccStepCertWithExtraParams(t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool, extraParams map[string]interface{}) logicaltest.TestStep { - data := map[string]interface{}{ - "certificate": string(cert), - "policies": policies, - "display_name": name, - "allowed_names": testData.names, - "allowed_common_names": testData.common_names, - "allowed_dns_sans": testData.dns, - "allowed_email_sans": testData.emails, - "allowed_uri_sans": testData.uris, - "allowed_organizational_units": testData.organizational_units, - "required_extensions": testData.ext, - "allowed_metadata_extensions": testData.metadata_ext, - "lease": 1000, - } - for k, v := range extraParams { - data[k] = v - } +func testAccStepCert( + t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool, +) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "certs/" + name, ErrorOk: expectError, - Data: data, - Check: func(resp *logical.Response) error { - if resp == nil && expectError { - return fmt.Errorf("expected error but received nil") - } - return nil + Data: map[string]interface{}{ + "certificate": string(cert), + "policies": policies, + "display_name": name, + "allowed_names": testData.names, + "allowed_common_names": testData.common_names, + "allowed_dns_sans": testData.dns, + "allowed_email_sans": testData.emails, + "allowed_uri_sans": testData.uris, + "allowed_organizational_units": testData.organizational_units, + "required_extensions": testData.ext, + "allowed_metadata_extensions": testData.metadata_ext, + "lease": 1000, }, - } -} - -func testAccStepReadCertPolicy(t *testing.T, name string, expectError bool, expected map[string]interface{}) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "certs/" + name, - ErrorOk: expectError, - Data: nil, Check: func(resp *logical.Response) error { - if (resp == nil || len(resp.Data) == 0) && expectError { + if resp == nil && expectError { return fmt.Errorf("expected error but received nil") } - for key, expectedValue := range expected { - actualValue := resp.Data[key] - if expectedValue != actualValue { - return fmt.Errorf("Expected to get [%v]=[%v] but read [%v]=[%v] from server for certs/%v: %v", key, expectedValue, key, actualValue, name, resp) - } - } return nil }, } diff --git a/builtin/credential/cert/cli.go b/builtin/credential/cert/cli.go index 3ba1e712d1344..4a470c89616f8 100644 --- a/builtin/credential/cert/cli.go +++ b/builtin/credential/cert/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( diff --git a/builtin/credential/cert/cmd/cert/main.go b/builtin/credential/cert/cmd/cert/main.go index 5b80a54cde778..09018ec3f040d 100644 --- a/builtin/credential/cert/cmd/cert/main.go +++ b/builtin/credential/cert/cmd/cert/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: cert.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go index 03a3e5586210e..d303dec15d46c 100644 --- a/builtin/credential/cert/path_certs.go +++ b/builtin/credential/cert/path_certs.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -10,8 +7,7 @@ import ( "strings" "time" - "github.com/hashicorp/go-sockaddr" - + sockaddr "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" @@ -21,33 +17,22 @@ func pathListCerts(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixCert, - OperationSuffix: "certificates", - Navigation: true, - ItemType: "Certificate", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathCertList, }, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "Certificate", + }, } } func pathCerts(b *backend) *framework.Path { p := &framework.Path{ Pattern: "certs/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixCert, - OperationSuffix: "certificate", - Action: "Create", - ItemType: "Certificate", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -62,35 +47,7 @@ Must be x509 PEM encoded.`, EditType: "file", }, }, - "ocsp_enabled": { - Type: framework.TypeBool, - Description: `Whether to attempt OCSP verification of certificates at login`, - }, - "ocsp_ca_certificates": { - Type: framework.TypeString, - Description: `Any additional CA certificates needed to communicate with OCSP servers`, - DisplayAttrs: &framework.DisplayAttributes{ - EditType: "file", - }, - }, - "ocsp_servers_override": { - Type: framework.TypeCommaStringSlice, - Description: `A comma-separated list of OCSP server addresses. If unset, the OCSP server is determined -from the AuthorityInformationAccess extension on the certificate being inspected.`, - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.", - }, - }, - "ocsp_fail_open": { - Type: framework.TypeBool, - Default: false, - Description: "If set to true, if an OCSP revocation cannot be made successfully, login will proceed rather than failing. If false, failing to get an OCSP status fails the request.", - }, - "ocsp_query_all_servers": { - Type: framework.TypeBool, - Default: false, - Description: "If set to true, rather than accepting the first successful OCSP response, query all servers and consider the certificate valid only if all servers agree.", - }, + "allowed_names": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of names. @@ -98,8 +55,7 @@ At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", - Description: "A list of names. At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.", + Group: "Constraints", }, }, @@ -108,8 +64,7 @@ allowed_email_sans, allowed_uri_sans.`, Description: `A comma-separated list of names. At least one must exist in the Common Name. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", - Description: "A list of names. At least one must exist in the Common Name. Supports globbing.", + Group: "Constraints", }, }, @@ -118,9 +73,8 @@ At least one must exist in the Common Name. Supports globbing.`, Description: `A comma-separated list of DNS names. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed DNS SANs", - Group: "Constraints", - Description: "A list of DNS names. At least one must exist in the SANs. Supports globbing.", + Name: "Allowed DNS SANs", + Group: "Constraints", }, }, @@ -129,9 +83,8 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of Email Addresses. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed Email SANs", - Group: "Constraints", - Description: "A list of Email Addresses. At least one must exist in the SANs. Supports globbing.", + Name: "Allowed Email SANs", + Group: "Constraints", }, }, @@ -140,9 +93,8 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of URIs. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed URI SANs", - Group: "Constraints", - Description: "A list of URIs. At least one must exist in the SANs. Supports globbing.", + Name: "Allowed URI SANs", + Group: "Constraints", }, }, @@ -151,8 +103,7 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of Organizational Units names. At least one must exist in the OU field.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", - Description: "A list of Organizational Units names. At least one must exist in the OU field.", + Group: "Constraints", }, }, @@ -161,9 +112,6 @@ At least one must exist in the OU field.`, Description: `A comma-separated string or array of extensions formatted as "oid:value". Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on "value".`, - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of extensions formatted as 'oid:value'. Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on 'value'.", - }, }, "allowed_metadata_extensions": { @@ -172,9 +120,6 @@ All values much match. Supports globbing on "value".`, Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the oid numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.`, - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of OID extensions. Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the OID numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.", - }, }, "display_name": { @@ -228,6 +173,10 @@ certificate.`, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "Certificate", + }, } tokenutil.AddTokenFields(p.Fields) @@ -303,11 +252,6 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra "allowed_organizational_units": cert.AllowedOrganizationalUnits, "required_extensions": cert.RequiredExtensions, "allowed_metadata_extensions": cert.AllowedMetadataExtensions, - "ocsp_ca_certificates": cert.OcspCaCertificates, - "ocsp_enabled": cert.OcspEnabled, - "ocsp_servers_override": cert.OcspServersOverride, - "ocsp_fail_open": cert.OcspFailOpen, - "ocsp_query_all_servers": cert.OcspQueryAllServers, } cert.PopulateTokenData(data) @@ -350,21 +294,6 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr if certificateRaw, ok := d.GetOk("certificate"); ok { cert.Certificate = certificateRaw.(string) } - if ocspCertificatesRaw, ok := d.GetOk("ocsp_ca_certificates"); ok { - cert.OcspCaCertificates = ocspCertificatesRaw.(string) - } - if ocspEnabledRaw, ok := d.GetOk("ocsp_enabled"); ok { - cert.OcspEnabled = ocspEnabledRaw.(bool) - } - if ocspServerOverrides, ok := d.GetOk("ocsp_servers_override"); ok { - cert.OcspServersOverride = ocspServerOverrides.([]string) - } - if ocspFailOpen, ok := d.GetOk("ocsp_fail_open"); ok { - cert.OcspFailOpen = ocspFailOpen.(bool) - } - if ocspQueryAll, ok := d.GetOk("ocsp_query_all_servers"); ok { - cert.OcspQueryAllServers = ocspQueryAll.(bool) - } if displayNameRaw, ok := d.GetOk("display_name"); ok { cert.DisplayName = displayNameRaw.(string) } @@ -470,7 +399,7 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr } } if !clientAuth { - return logical.ErrorResponse("nonCA certificates should have TLS client authentication set as an extended key usage"), nil + return logical.ErrorResponse("non-CA certificates should have TLS client authentication set as an extended key usage"), nil } } @@ -509,12 +438,6 @@ type CertEntry struct { RequiredExtensions []string AllowedMetadataExtensions []string BoundCIDRs []*sockaddr.SockAddrMarshaler - - OcspCaCertificates string - OcspEnabled bool - OcspServersOverride []string - OcspFailOpen bool - OcspQueryAllServers bool } const pathCertHelpSyn = ` @@ -526,7 +449,6 @@ This endpoint allows you to create, read, update, and delete trusted certificate that are allowed to authenticate. Deleting a certificate will not revoke auth for prior authenticated connections. -To do this, do a revoke on "login". If you don'log need to revoke login immediately, +To do this, do a revoke on "login". If you don't need to revoke login immediately, then the next renew will cause the lease to expire. - ` diff --git a/builtin/credential/cert/path_config.go b/builtin/credential/cert/path_config.go index 6f1f290372355..9cc17f3a6aafe 100644 --- a/builtin/credential/cert/path_config.go +++ b/builtin/credential/cert/path_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -11,16 +8,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const maxCacheSize = 100000 - func pathConfig(b *backend) *framework.Path { return &framework.Path{ Pattern: "config", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixCert, - }, - Fields: map[string]*framework.FieldSchema{ "disable_binding": { Type: framework.TypeBool, @@ -32,50 +22,28 @@ func pathConfig(b *backend) *framework.Path { Default: false, Description: `If set, metadata of the certificate including the metadata corresponding to allowed_metadata_extensions will be stored in the alias. Defaults to false.`, }, - "ocsp_cache_size": { - Type: framework.TypeInt, - Default: 100, - Description: `The size of the in memory OCSP response cache, shared by all configured certs`, - }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConfigWrite, + logical.ReadOperation: b.pathConfigRead, }, } } func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - config, err := b.Config(ctx, req.Storage) + disableBinding := data.Get("disable_binding").(bool) + enableIdentityAliasMetadata := data.Get("enable_identity_alias_metadata").(bool) + + entry, err := logical.StorageEntryJSON("config", config{ + DisableBinding: disableBinding, + EnableIdentityAliasMetadata: enableIdentityAliasMetadata, + }) if err != nil { return nil, err } - if disableBindingRaw, ok := data.GetOk("disable_binding"); ok { - config.DisableBinding = disableBindingRaw.(bool) - } - if enableIdentityAliasMetadataRaw, ok := data.GetOk("enable_identity_alias_metadata"); ok { - config.EnableIdentityAliasMetadata = enableIdentityAliasMetadataRaw.(bool) - } - if cacheSizeRaw, ok := data.GetOk("ocsp_cache_size"); ok { - cacheSize := cacheSizeRaw.(int) - if cacheSize < 2 || cacheSize > maxCacheSize { - return logical.ErrorResponse("invalid cache size, must be >= 2 and <= %d", maxCacheSize), nil - } - config.OcspCacheSize = cacheSize - } - if err := b.storeConfig(ctx, req.Storage, config); err != nil { + if err := req.Storage.Put(ctx, entry); err != nil { return nil, err } return nil, nil @@ -90,7 +58,6 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f data := map[string]interface{}{ "disable_binding": cfg.DisableBinding, "enable_identity_alias_metadata": cfg.EnableIdentityAliasMetadata, - "ocsp_cache_size": cfg.OcspCacheSize, } return &logical.Response{ @@ -118,5 +85,4 @@ func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error type config struct { DisableBinding bool `json:"disable_binding"` EnableIdentityAliasMetadata bool `json:"enable_identity_alias_metadata"` - OcspCacheSize int `json:"ocsp_cache_size"` } diff --git a/builtin/credential/cert/path_crls.go b/builtin/credential/cert/path_crls.go index 9dd710758cef1..afb00470676b8 100644 --- a/builtin/credential/cert/path_crls.go +++ b/builtin/credential/cert/path_crls.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -19,41 +16,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -func pathListCRLs(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "crls/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixCert, - OperationSuffix: "crls", - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ListOperation: &framework.PathOperation{ - Callback: b.pathCRLsList, - }, - }, - HelpSynopsis: pathCRLsHelpSyn, - HelpDescription: pathCRLsHelpDesc, - } -} - -func (b *backend) pathCRLsList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List(ctx, "crls/") - if err != nil { - return nil, fmt.Errorf("failed to list CRLs: %w", err) - } - - return logical.ListResponse(entries), nil -} - func pathCRLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "crls/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixCert, - OperationSuffix: "crl", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -333,7 +298,7 @@ Manage Certificate Revocation Lists checked during authentication. ` const pathCRLsHelpDesc = ` -This endpoint allows you to list, create, read, update, and delete the Certificate +This endpoint allows you to create, read, update, and delete the Certificate Revocation Lists checked during authentication, and/or CRL Distribution Point URLs. diff --git a/builtin/credential/cert/path_crls_test.go b/builtin/credential/cert/path_crls_test.go index 24211f5cad5bb..7cb4bda83696c 100644 --- a/builtin/credential/cert/path_crls_test.go +++ b/builtin/credential/cert/path_crls_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -18,11 +15,11 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" ) @@ -194,7 +191,7 @@ func TestCRLFetch(t *testing.T) { // Give ourselves a little extra room on slower CI systems to ensure we // can fetch the new CRL. - corehelpers.RetryUntil(t, 2*time.Second, func() error { + vault.RetryUntil(t, 2*time.Second, func() error { b.crlUpdateMutex.Lock() defer b.crlUpdateMutex.Unlock() diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index d59c5b4a9195d..383f99a0cd7e4 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( @@ -17,13 +14,10 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/helper/cidrutil" - "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/cidrutil" glob "github.com/ryanuber/go-glob" ) @@ -36,10 +30,6 @@ type ParsedCert struct { func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixCert, - OperationVerb: "login", - }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -84,9 +74,6 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request } func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - if req.Connection == nil || req.Connection.ConnState == nil { - return nil, fmt.Errorf("tls connection not found") - } clientCerts := req.Connection.ConnState.PeerCertificates if len(clientCerts) == 0 { return nil, fmt.Errorf("no client certificate found") @@ -106,9 +93,6 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra if err != nil { return nil, err } - if b.configUpdated.Load() { - b.updatedConfig(config) - } var matched *ParsedCert if verifyResp, resp, err := b.verifyCredentials(ctx, req, data); err != nil { @@ -182,9 +166,6 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f if err != nil { return nil, err } - if b.configUpdated.Load() { - b.updatedConfig(config) - } if !config.DisableBinding { var matched *ParsedCert @@ -255,8 +236,8 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d certName = d.Get("name").(string) } - // Load the trusted certificates and other details - roots, trusted, trustedNonCAs, verifyConf := b.loadTrustedCerts(ctx, req.Storage, certName) + // Load the trusted certificates + roots, trusted, trustedNonCAs := b.loadTrustedCerts(ctx, req.Storage, certName) // Get the list of full chains matching the connection and validates the // certificate itself @@ -265,37 +246,16 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d return nil, nil, err } - var extraCas []*x509.Certificate - for _, t := range trusted { - extraCas = append(extraCas, t.Certificates...) - } - // If trustedNonCAs is not empty it means that client had registered a non-CA cert // with the backend. - var retErr error if len(trustedNonCAs) != 0 { for _, trustedNonCA := range trustedNonCAs { tCert := trustedNonCA.Certificates[0] // Check for client cert being explicitly listed in the config (and matching other constraints) if tCert.SerialNumber.Cmp(clientCert.SerialNumber) == 0 && - bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) { - matches, err := b.matchesConstraints(ctx, clientCert, trustedNonCA.Certificates, trustedNonCA, verifyConf) - - // matchesConstraints returns an error when OCSP verification fails, - // but some other path might still give us success. Add to the - // retErr multierror, but avoid duplicates. This way, if we reach a - // failure later, we can give additional context. - // - // XXX: If matchesConstraints is updated to generate additional, - // immediately fatal errors, we likely need to extend it to return - // another boolean (fatality) or other detection scheme. - if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { - retErr = multierror.Append(retErr, err) - } - - if matches { - return trustedNonCA, nil, nil - } + bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) && + b.matchesConstraints(clientCert, trustedNonCA.Certificates, trustedNonCA) { + return trustedNonCA, nil, nil } } } @@ -303,54 +263,36 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d // If no trusted chain was found, client is not authenticated // This check happens after checking for a matching configured non-CA certs if len(trustedChains) == 0 { - if retErr == nil { - return nil, logical.ErrorResponse(fmt.Sprintf("invalid certificate or no client certificate supplied; additionally got errors during verification: %v", retErr)), nil - } return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil } // Search for a ParsedCert that intersects with the validated chains and any additional constraints + matches := make([]*ParsedCert, 0) for _, trust := range trusted { // For each ParsedCert in the config for _, tCert := range trust.Certificates { // For each certificate in the entry for _, chain := range trustedChains { // For each root chain that we matched for _, cCert := range chain { // For each cert in the matched chain - if tCert.Equal(cCert) { // ParsedCert intersects with matched chain - match, err := b.matchesConstraints(ctx, clientCert, chain, trust, verifyConf) // validate client cert + matched chain against the config - - // See note above. - if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { - retErr = multierror.Append(retErr, err) - } - - // Return the first matching entry (for backwards - // compatibility, we continue to just pick the first - // one if we have multiple matches). - // - // Here, we return directly: this means that any - // future OCSP errors would be ignored; in the future, - // if these become fatal, we could revisit this - // choice and choose the first match after evaluating - // all possible candidates. - if match && err == nil { - return trust, nil, nil - } + if tCert.Equal(cCert) && // ParsedCert intersects with matched chain + b.matchesConstraints(clientCert, chain, trust) { // validate client cert + matched chain against the config + // Add the match to the list + matches = append(matches, trust) } } } } } - if retErr != nil { - return nil, logical.ErrorResponse(fmt.Sprintf("no chain matching all constraints could be found for this login certificate; additionally got errors during verification: %v", retErr)), nil + // Fail on no matches + if len(matches) == 0 { + return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil } - return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil + // Return the first matching entry (for backwards compatibility, we continue to just pick one if multiple match) + return matches[0], nil, nil } -func (b *backend) matchesConstraints(ctx context.Context, clientCert *x509.Certificate, trustedChain []*x509.Certificate, - config *ParsedCert, conf *ocsp.VerifyConfig, -) (bool, error) { - soFar := !b.checkForChainInCRLs(trustedChain) && +func (b *backend) matchesConstraints(clientCert *x509.Certificate, trustedChain []*x509.Certificate, config *ParsedCert) bool { + return !b.checkForChainInCRLs(trustedChain) && b.matchesNames(clientCert, config) && b.matchesCommonName(clientCert, config) && b.matchesDNSSANs(clientCert, config) && @@ -358,14 +300,6 @@ func (b *backend) matchesConstraints(ctx context.Context, clientCert *x509.Certi b.matchesURISANs(clientCert, config) && b.matchesOrganizationalUnits(clientCert, config) && b.matchesCertificateExtensions(clientCert, config) - if config.Entry.OcspEnabled { - ocspGood, err := b.checkForCertInOCSP(ctx, clientCert, trustedChain, conf) - if err != nil { - return false, err - } - soFar = soFar && ocspGood - } - return soFar, nil } // matchesNames verifies that the certificate matches at least one configured @@ -512,7 +446,7 @@ func (b *backend) matchesCertificateExtensions(clientCert *x509.Certificate, con asn1.Unmarshal(ext.Value, &parsedValue) clientExtMap[ext.Id.String()] = parsedValue } - // If any of the required extensions don'log match the constraint fails + // If any of the required extensions don't match the constraint fails for _, requiredExt := range config.Entry.RequiredExtensions { reqExt := strings.SplitN(requiredExt, ":", 2) clientExtValue, clientExtValueOk := clientExtMap[reqExt[0]] @@ -556,7 +490,7 @@ func (b *backend) certificateExtensionsMetadata(clientCert *x509.Certificate, co } // loadTrustedCerts is used to load all the trusted certificates from the backend -func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { +func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert) { pool = x509.NewCertPool() trusted = make([]*ParsedCert, 0) trustedNonCAs = make([]*ParsedCert, 0) @@ -573,7 +507,6 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, } } - conf = &ocsp.VerifyConfig{} for _, name := range names { entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, "cert/")) if err != nil { @@ -581,7 +514,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, continue } if entry == nil { - // This could happen when the certName was provided and the cert doesn'log exist, + // This could happen when the certName was provided and the cert doesn't exist, // or just if between the LIST and the GET the cert was deleted. continue } @@ -591,8 +524,6 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, b.Logger().Error("failed to parse certificate", "name", name) continue } - parsed = append(parsed, parsePEM([]byte(entry.OcspCaCertificates))...) - if !parsed[0].IsCA { trustedNonCAs = append(trustedNonCAs, &ParsedCert{ Entry: entry, @@ -609,39 +540,10 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, Certificates: parsed, }) } - if entry.OcspEnabled { - conf.OcspEnabled = true - conf.OcspServersOverride = append(conf.OcspServersOverride, entry.OcspServersOverride...) - if entry.OcspFailOpen { - conf.OcspFailureMode = ocsp.FailOpenTrue - } else { - conf.OcspFailureMode = ocsp.FailOpenFalse - } - conf.QueryAllServers = conf.QueryAllServers || entry.OcspQueryAllServers - } } return } -func (b *backend) checkForCertInOCSP(ctx context.Context, clientCert *x509.Certificate, chain []*x509.Certificate, conf *ocsp.VerifyConfig) (bool, error) { - if !conf.OcspEnabled || len(chain) < 2 { - return true, nil - } - b.ocspClientMutex.RLock() - defer b.ocspClientMutex.RUnlock() - err := b.ocspClient.VerifyLeafCertificate(ctx, clientCert, chain[1], conf) - if err != nil { - // We want to preserve error messages when they have additional, - // potentially useful information. Just having a revoked cert - // isn't additionally useful. - if !strings.Contains(err.Error(), "has been revoked") { - return false, err - } - return false, nil - } - return true, nil -} - func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool { badChain := false for _, cert := range chain { diff --git a/builtin/credential/cert/path_login_test.go b/builtin/credential/cert/path_login_test.go index d86bd31bd92eb..a01ec981663f3 100644 --- a/builtin/credential/cert/path_login_test.go +++ b/builtin/credential/cert/path_login_test.go @@ -1,64 +1,24 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cert import ( - "context" "crypto/tls" "crypto/x509" "crypto/x509/pkix" - "fmt" "io/ioutil" "math/big" mathrand "math/rand" "net" - "net/http" "os" "path/filepath" "strings" "testing" "time" - "github.com/hashicorp/vault/sdk/helper/certutil" - - "golang.org/x/crypto/ocsp" - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" "github.com/hashicorp/vault/sdk/logical" ) -var ocspPort int - -var source InMemorySource - -type testLogger struct{} - -func (t *testLogger) Log(args ...any) { - fmt.Printf("%v", args) -} - -func TestMain(m *testing.M) { - source = make(InMemorySource) - - listener, err := net.Listen("tcp", ":0") - if err != nil { - return - } - - ocspPort = listener.Addr().(*net.TCPAddr).Port - srv := &http.Server{ - Addr: "localhost:0", - Handler: NewResponder(&testLogger{}, source, nil), - } - go func() { - srv.Serve(listener) - }() - defer srv.Shutdown(context.Background()) - m.Run() -} - func TestCert_RoleResolve(t *testing.T) { certTemplate := &x509.Certificate{ Subject: pkix.Name{ @@ -199,34 +159,6 @@ func testAccStepResolveRoleExpectRoleResolutionToFail(t *testing.T, connState tl } } -func testAccStepResolveRoleOCSPFail(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Unauthenticated: true, - ConnState: &connState, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if resp == nil || !resp.IsError() { - t.Fatalf("Response was not an error: resp:%#v", resp) - } - - errString, ok := resp.Data["error"].(string) - if !ok { - t.Fatal("Error not part of response.") - } - - if !strings.Contains(errString, "no chain matching") { - t.Fatalf("Error was not due to OCSP failure. Error: %s", errString) - } - return nil - }, - Data: map[string]interface{}{ - "name": certName, - }, - } -} - func TestCert_RoleResolve_RoleDoesNotExist(t *testing.T) { certTemplate := &x509.Certificate{ Subject: pkix.Name{ @@ -265,98 +197,3 @@ func TestCert_RoleResolve_RoleDoesNotExist(t *testing.T) { }, }) } - -func TestCert_RoleResolveOCSP(t *testing.T) { - cases := []struct { - name string - failOpen bool - certStatus int - errExpected bool - }{ - {"failFalseGoodCert", false, ocsp.Good, false}, - {"failFalseRevokedCert", false, ocsp.Revoked, true}, - {"failFalseUnknownCert", false, ocsp.Unknown, true}, - {"failTrueGoodCert", true, ocsp.Good, false}, - {"failTrueRevokedCert", true, ocsp.Revoked, true}, - {"failTrueUnknownCert", true, ocsp.Unknown, false}, - } - certTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "example.com", - }, - DNSNames: []string{"example.com"}, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, - SerialNumber: big.NewInt(mathrand.Int63()), - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - OCSPServer: []string{fmt.Sprintf("http://localhost:%d", ocspPort)}, - } - tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) - if tempDir != "" { - defer os.RemoveAll(tempDir) - } - if err != nil { - t.Fatalf("error testing connection state: %v", err) - } - ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) - if err != nil { - t.Fatalf("err: %v", err) - } - - issuer := parsePEM(ca) - pkf, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_key.pem")) - if err != nil { - t.Fatalf("err: %v", err) - } - pk, err := certutil.ParsePEMBundle(string(pkf)) - if err != nil { - t.Fatalf("err: %v", err) - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - resp, err := ocsp.CreateResponse(issuer[0], issuer[0], ocsp.Response{ - Status: c.certStatus, - SerialNumber: certTemplate.SerialNumber, - ProducedAt: time.Now(), - ThisUpdate: time.Now(), - NextUpdate: time.Now().Add(time.Hour), - }, pk.PrivateKey) - if err != nil { - t.Fatal(err) - } - source[certTemplate.SerialNumber.String()] = resp - - b := testFactory(t) - b.(*backend).ocspClient.ClearCache() - var resolveStep logicaltest.TestStep - var loginStep logicaltest.TestStep - if c.errExpected { - loginStep = testAccStepLoginWithNameInvalid(t, connState, "web") - resolveStep = testAccStepResolveRoleOCSPFail(t, connState, "web") - } else { - loginStep = testAccStepLoginWithName(t, connState, "web") - resolveStep = testAccStepResolveRoleWithName(t, connState, "web") - } - logicaltest.Test(t, logicaltest.TestCase{ - CredentialBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepCertWithExtraParams(t, "web", ca, "foo", allowed{dns: "example.com"}, false, - map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), - testAccStepReadCertPolicy(t, "web", false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), - loginStep, - resolveStep, - }, - }) - }) - } -} - -func serialFromBigInt(serial *big.Int) string { - return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) -} diff --git a/builtin/credential/cert/test_responder.go b/builtin/credential/cert/test_responder.go deleted file mode 100644 index d68ebe080e083..0000000000000 --- a/builtin/credential/cert/test_responder.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Package ocsp implements an OCSP responder based on a generic storage backend. -// It provides a couple of sample implementations. -// Because OCSP responders handle high query volumes, we have to be careful -// about how much logging we do. Error-level logs are reserved for problems -// internal to the server, that can be fixed by an administrator. Any type of -// incorrect input from a user should be logged and Info or below. For things -// that are logged on every request, Debug is the appropriate level. -// -// From https://github.com/cloudflare/cfssl/blob/master/ocsp/responder.go - -package cert - -import ( - "crypto" - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "time" - - "golang.org/x/crypto/ocsp" -) - -var ( - malformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} - internalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} - tryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} - sigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} - unauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} - - // ErrNotFound indicates the request OCSP response was not found. It is used to - // indicate that the responder should reply with unauthorizedErrorResponse. - ErrNotFound = errors.New("Request OCSP Response not found") -) - -// Source represents the logical source of OCSP responses, i.e., -// the logic that actually chooses a response based on a request. In -// order to create an actual responder, wrap one of these in a Responder -// object and pass it to http.Handle. By default the Responder will set -// the headers Cache-Control to "max-age=(response.NextUpdate-now), public, no-transform, must-revalidate", -// Last-Modified to response.ThisUpdate, Expires to response.NextUpdate, -// ETag to the SHA256 hash of the response, and Content-Type to -// application/ocsp-response. If you want to override these headers, -// or set extra headers, your source should return a http.Header -// with the headers you wish to set. If you don'log want to set any -// extra headers you may return nil instead. -type Source interface { - Response(*ocsp.Request) ([]byte, http.Header, error) -} - -// An InMemorySource is a map from serialNumber -> der(response) -type InMemorySource map[string][]byte - -// Response looks up an OCSP response to provide for a given request. -// InMemorySource looks up a response purely based on serial number, -// without regard to what issuer the request is asking for. -func (src InMemorySource) Response(request *ocsp.Request) ([]byte, http.Header, error) { - response, present := src[request.SerialNumber.String()] - if !present { - return nil, nil, ErrNotFound - } - return response, nil, nil -} - -// Stats is a basic interface that allows users to record information -// about returned responses -type Stats interface { - ResponseStatus(ocsp.ResponseStatus) -} - -type logger interface { - Log(args ...any) -} - -// A Responder object provides the HTTP logic to expose a -// Source of OCSP responses. -type Responder struct { - log logger - Source Source - stats Stats -} - -// NewResponder instantiates a Responder with the give Source. -func NewResponder(t logger, source Source, stats Stats) *Responder { - return &Responder{ - Source: source, - stats: stats, - log: t, - } -} - -func overrideHeaders(response http.ResponseWriter, headers http.Header) { - for k, v := range headers { - if len(v) == 1 { - response.Header().Set(k, v[0]) - } else if len(v) > 1 { - response.Header().Del(k) - for _, e := range v { - response.Header().Add(k, e) - } - } - } -} - -// hashToString contains mappings for the only hash functions -// x/crypto/ocsp supports -var hashToString = map[crypto.Hash]string{ - crypto.SHA1: "SHA1", - crypto.SHA256: "SHA256", - crypto.SHA384: "SHA384", - crypto.SHA512: "SHA512", -} - -// A Responder can process both GET and POST requests. The mapping -// from an OCSP request to an OCSP response is done by the Source; -// the Responder simply decodes the request, and passes back whatever -// response is provided by the source. -// Note: The caller must use http.StripPrefix to strip any path components -// (including '/') on GET requests. -// Do not use this responder in conjunction with http.NewServeMux, because the -// default handler will try to canonicalize path components by changing any -// strings of repeated '/' into a single '/', which will break the base64 -// encoding. -func (rs *Responder) ServeHTTP(response http.ResponseWriter, request *http.Request) { - // By default we set a 'max-age=0, no-cache' Cache-Control header, this - // is only returned to the client if a valid authorized OCSP response - // is not found or an error is returned. If a response if found the header - // will be altered to contain the proper max-age and modifiers. - response.Header().Add("Cache-Control", "max-age=0, no-cache") - // Read response from request - var requestBody []byte - var err error - switch request.Method { - case "GET": - base64Request, err := url.QueryUnescape(request.URL.Path) - if err != nil { - rs.log.Log("Error decoding URL:", request.URL.Path) - response.WriteHeader(http.StatusBadRequest) - return - } - // url.QueryUnescape not only unescapes %2B escaping, but it additionally - // turns the resulting '+' into a space, which makes base64 decoding fail. - // So we go back afterwards and turn ' ' back into '+'. This means we - // accept some malformed input that includes ' ' or %20, but that's fine. - base64RequestBytes := []byte(base64Request) - for i := range base64RequestBytes { - if base64RequestBytes[i] == ' ' { - base64RequestBytes[i] = '+' - } - } - // In certain situations a UA may construct a request that has a double - // slash between the host name and the base64 request body due to naively - // constructing the request URL. In that case strip the leading slash - // so that we can still decode the request. - if len(base64RequestBytes) > 0 && base64RequestBytes[0] == '/' { - base64RequestBytes = base64RequestBytes[1:] - } - requestBody, err = base64.StdEncoding.DecodeString(string(base64RequestBytes)) - if err != nil { - rs.log.Log("Error decoding base64 from URL", string(base64RequestBytes)) - response.WriteHeader(http.StatusBadRequest) - return - } - case "POST": - requestBody, err = ioutil.ReadAll(request.Body) - if err != nil { - rs.log.Log("Problem reading body of POST", err) - response.WriteHeader(http.StatusBadRequest) - return - } - default: - response.WriteHeader(http.StatusMethodNotAllowed) - return - } - b64Body := base64.StdEncoding.EncodeToString(requestBody) - rs.log.Log("Received OCSP request", b64Body) - - // All responses after this point will be OCSP. - // We could check for the content type of the request, but that - // seems unnecessariliy restrictive. - response.Header().Add("Content-Type", "application/ocsp-response") - - // Parse response as an OCSP request - // XXX: This fails if the request contains the nonce extension. - // We don'log intend to support nonces anyway, but maybe we - // should return unauthorizedRequest instead of malformed. - ocspRequest, err := ocsp.ParseRequest(requestBody) - if err != nil { - rs.log.Log("Error decoding request body", b64Body) - response.WriteHeader(http.StatusBadRequest) - response.Write(malformedRequestErrorResponse) - if rs.stats != nil { - rs.stats.ResponseStatus(ocsp.Malformed) - } - return - } - - // Look up OCSP response from source - ocspResponse, headers, err := rs.Source.Response(ocspRequest) - if err != nil { - if err == ErrNotFound { - rs.log.Log("No response found for request: serial %x, request body %s", - ocspRequest.SerialNumber, b64Body) - response.Write(unauthorizedErrorResponse) - if rs.stats != nil { - rs.stats.ResponseStatus(ocsp.Unauthorized) - } - return - } - rs.log.Log("Error retrieving response for request: serial %x, request body %s, error", - ocspRequest.SerialNumber, b64Body, err) - response.WriteHeader(http.StatusInternalServerError) - response.Write(internalErrorErrorResponse) - if rs.stats != nil { - rs.stats.ResponseStatus(ocsp.InternalError) - } - return - } - - parsedResponse, err := ocsp.ParseResponse(ocspResponse, nil) - if err != nil { - rs.log.Log("Error parsing response for serial %x", - ocspRequest.SerialNumber, err) - response.Write(internalErrorErrorResponse) - if rs.stats != nil { - rs.stats.ResponseStatus(ocsp.InternalError) - } - return - } - - // Write OCSP response to response - response.Header().Add("Last-Modified", parsedResponse.ThisUpdate.Format(time.RFC1123)) - response.Header().Add("Expires", parsedResponse.NextUpdate.Format(time.RFC1123)) - now := time.Now() - maxAge := 0 - if now.Before(parsedResponse.NextUpdate) { - maxAge = int(parsedResponse.NextUpdate.Sub(now) / time.Second) - } else { - // TODO(#530): we want max-age=0 but this is technically an authorized OCSP response - // (despite being stale) and 5019 forbids attaching no-cache - maxAge = 0 - } - response.Header().Set( - "Cache-Control", - fmt.Sprintf( - "max-age=%d, public, no-transform, must-revalidate", - maxAge, - ), - ) - responseHash := sha256.Sum256(ocspResponse) - response.Header().Add("ETag", fmt.Sprintf("\"%X\"", responseHash)) - - if headers != nil { - overrideHeaders(response, headers) - } - - // RFC 7232 says that a 304 response must contain the above - // headers if they would also be sent for a 200 for the same - // request, so we have to wait until here to do this - if etag := request.Header.Get("If-None-Match"); etag != "" { - if etag == fmt.Sprintf("\"%X\"", responseHash) { - response.WriteHeader(http.StatusNotModified) - return - } - } - response.WriteHeader(http.StatusOK) - response.Write(ocspResponse) - if rs.stats != nil { - rs.stats.ResponseStatus(ocsp.Success) - } -} - -/* -Copyright (c) 2014 CloudFlare Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ diff --git a/builtin/credential/github/backend.go b/builtin/credential/github/backend.go index f8bbcc403c61d..89ce37c7cd6d0 100644 --- a/builtin/credential/github/backend.go +++ b/builtin/credential/github/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( @@ -14,8 +11,6 @@ import ( "golang.org/x/oauth2" ) -const operationPrefixGithub = "github" - func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { @@ -33,17 +28,6 @@ func Backend() *backend { DefaultKey: "default", } - teamMapPaths := b.TeamMap.Paths() - - teamMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - OperationSuffix: "teams", - } - teamMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - OperationSuffix: "team-mapping", - } - b.UserMap = &framework.PolicyMap{ PathMap: framework.PathMap{ Name: "users", @@ -51,18 +35,7 @@ func Backend() *backend { DefaultKey: "default", } - userMapPaths := b.UserMap.Paths() - - userMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - OperationSuffix: "users", - } - userMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - OperationSuffix: "user-mapping", - } - - allPaths := append(teamMapPaths, userMapPaths...) + allPaths := append(b.TeamMap.Paths(), b.UserMap.Paths()...) b.Backend = &framework.Backend{ Help: backendHelp, diff --git a/builtin/credential/github/backend_test.go b/builtin/credential/github/backend_test.go index 6ea08ee58134e..f3360f52cfb57 100644 --- a/builtin/credential/github/backend_test.go +++ b/builtin/credential/github/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( diff --git a/builtin/credential/github/cli.go b/builtin/credential/github/cli.go index d40f1b56d9e7e..bccc6fa516e2d 100644 --- a/builtin/credential/github/cli.go +++ b/builtin/credential/github/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( diff --git a/builtin/credential/github/cmd/github/main.go b/builtin/credential/github/cmd/github/main.go index 499469a0f6813..be4fbb64ca65f 100644 --- a/builtin/credential/github/cmd/github/main.go +++ b/builtin/credential/github/cmd/github/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: github.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go index 83238f32c49b4..707115c567e33 100644 --- a/builtin/credential/github/path_config.go +++ b/builtin/credential/github/path_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( @@ -20,11 +17,6 @@ import ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - }, - Fields: map[string]*framework.FieldSchema{ "organization": { Type: framework.TypeString, @@ -57,20 +49,9 @@ API-compatible authentication server.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - OperationVerb: "configure", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConfigWrite, + logical.ReadOperation: b.pathConfigRead, }, } diff --git a/builtin/credential/github/path_config_test.go b/builtin/credential/github/path_config_test.go index 2f592b21f7b8c..d59599f326209 100644 --- a/builtin/credential/github/path_config_test.go +++ b/builtin/credential/github/path_config_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go index 4ee94e5d5c670..252b5641cd4fe 100644 --- a/builtin/credential/github/path_login.go +++ b/builtin/credential/github/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( @@ -19,12 +16,6 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixGithub, - OperationVerb: "login", - }, - Fields: map[string]*framework.FieldSchema{ "token": { Type: framework.TypeString, diff --git a/builtin/credential/github/path_login_test.go b/builtin/credential/github/path_login_test.go index 282e3fa9401df..25baf7f811e8a 100644 --- a/builtin/credential/github/path_login_test.go +++ b/builtin/credential/github/path_login_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package github import ( diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index ffc1d1ee8c39f..d318fe4b4c6bb 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -14,10 +11,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const ( - operationPrefixLDAP = "ldap" - errUserBindFailed = "ldap operation failed: failed to bind as user" -) +const errUserBindFailed = `ldap operation failed: failed to bind as user` func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() @@ -113,7 +107,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri if b.Logger().IsDebug() { b.Logger().Debug("ldap bind failed", "error", err) } - return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials + return "", nil, logical.ErrorResponse(errUserBindFailed), nil, nil } // We re-bind to the BindDN if it's defined because we assume @@ -123,7 +117,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri if b.Logger().IsDebug() { b.Logger().Debug("error while attempting to re-bind with the BindDN User", "error", err) } - return "", nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, logical.ErrInvalidCredentials + return "", nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, nil } if b.Logger().IsDebug() { b.Logger().Debug("re-bound to original binddn") diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index beda248e4b090..74dfdf99ed074 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -1242,7 +1239,6 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { RequestTimeout: cfg.RequestTimeout, ConnectionTimeout: cfg.ConnectionTimeout, UsernameAsAlias: false, - DerefAliases: "never", MaximumPageSize: 1000, }, } diff --git a/builtin/credential/ldap/cli.go b/builtin/credential/ldap/cli.go index 772603434940a..e0d744b4caadb 100644 --- a/builtin/credential/ldap/cli.go +++ b/builtin/credential/ldap/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -29,15 +26,12 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro } password, ok := m["password"] if !ok { - password = passwordFromEnv() - if password == "" { - fmt.Fprintf(os.Stderr, "Password (will be hidden): ") - var err error - password, err = pwd.Read(os.Stdin) - fmt.Fprintf(os.Stderr, "\n") - if err != nil { - return nil, err - } + fmt.Fprintf(os.Stderr, "Password (will be hidden): ") + var err error + password, err = pwd.Read(os.Stdin) + fmt.Fprintf(os.Stderr, "\n") + if err != nil { + return nil, err } } @@ -76,9 +70,8 @@ Usage: vault login -method=ldap [CONFIG K=V...] Configuration: password= - LDAP password to use for authentication. If not provided, it will use - the VAULT_LDAP_PASSWORD environment variable. If this is not set, the - CLI will prompt for this on stdin. + LDAP password to use for authentication. If not provided, the CLI will + prompt for this on stdin. username= LDAP username to use for authentication. @@ -96,7 +89,3 @@ func usernameFromEnv() string { } return "" } - -func passwordFromEnv() string { - return os.Getenv("VAULT_LDAP_PASSWORD") -} diff --git a/builtin/credential/ldap/cmd/ldap/main.go b/builtin/credential/ldap/cmd/ldap/main.go index 2dcb802e20927..b632c011ce136 100644 --- a/builtin/credential/ldap/cmd/ldap/main.go +++ b/builtin/credential/ldap/cmd/ldap/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: ldap.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go index a06c6666f8857..45e5294c79d93 100644 --- a/builtin/credential/ldap/path_config.go +++ b/builtin/credential/ldap/path_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -19,31 +16,18 @@ const userFilterWarning = "userfilter configured does not consider userattr and func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, + Fields: ldaputil.ConfigFields(), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - Action: "Configure", - }, - - Fields: ldaputil.ConfigFields(), - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "auth-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure-auth", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigRead, + logical.UpdateOperation: b.pathConfigWrite, }, HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Configure", + }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/ldap/path_groups.go b/builtin/credential/ldap/path_groups.go index 08ac00d7fd2c2..b39691cf8174f 100644 --- a/builtin/credential/ldap/path_groups.go +++ b/builtin/credential/ldap/path_groups.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -16,33 +13,22 @@ func pathGroupsList(b *backend) *framework.Path { return &framework.Path{ Pattern: "groups/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - OperationSuffix: "groups", - Navigation: true, - ItemType: "Group", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathGroupList, }, HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "Group", + }, } } func pathGroups(b *backend) *framework.Path { return &framework.Path{ Pattern: `groups/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - OperationSuffix: "group", - Action: "Create", - ItemType: "Group", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -52,9 +38,6 @@ func pathGroups(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of policies associated to the group.", - }, }, }, @@ -66,6 +49,10 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "Group", + }, } } diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index 440e725399212..67303911e5a17 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -16,12 +13,6 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - OperationVerb: "login", - }, - Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, diff --git a/builtin/credential/ldap/path_users.go b/builtin/credential/ldap/path_users.go index 1ce252d1df558..a4e18d30eb6d8 100644 --- a/builtin/credential/ldap/path_users.go +++ b/builtin/credential/ldap/path_users.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( @@ -17,33 +14,22 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - OperationSuffix: "users", - Navigation: true, - ItemType: "User", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "User", + }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - OperationSuffix: "user", - Action: "Create", - ItemType: "User", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -53,17 +39,11 @@ func pathUsers(b *backend) *framework.Path { "groups": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of additional groups associated with the user.", - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of additional groups associated with the user.", - }, }, "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated with the user.", - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of policies associated with the user.", - }, }, }, @@ -75,6 +55,10 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "User", + }, } } diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index 04dba968ab36f..d7ac1d8cbcf48 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -18,9 +15,8 @@ import ( ) const ( - operationPrefixOkta = "okta" - mfaPushMethod = "push" - mfaTOTPMethod = "token:software:totp" + mfaPushMethod = "push" + mfaTOTPMethod = "token:software:totp" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { @@ -271,12 +267,10 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username, pas return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil, nil } - timer := time.NewTimer(1 * time.Second) select { - case <-timer.C: + case <-time.After(1 * time.Second): // Continue case <-ctx.Done(): - timer.Stop() return nil, logical.ErrorResponse("exiting pending mfa challenge"), nil, nil } case "REJECTED": diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go index 85642e802a498..ee1588ee25ac5 100644 --- a/builtin/credential/okta/backend_test.go +++ b/builtin/credential/okta/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -12,7 +9,6 @@ import ( "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/policyutil" @@ -39,15 +35,6 @@ func TestBackend_Config(t *testing.T) { if os.Getenv("VAULT_ACC") == "" { t.SkipNow() } - - // Ensure each cred is populated. - credNames := []string{ - "OKTA_USERNAME", - "OKTA_PASSWORD", - "OKTA_API_TOKEN", - } - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - defaultLeaseTTLVal := time.Hour * 12 maxLeaseTTLVal := time.Hour * 24 b, err := Factory(context.Background(), &logical.BackendConfig{ diff --git a/builtin/credential/okta/cli.go b/builtin/credential/okta/cli.go index df252960a2c22..f6e3d13b73c22 100644 --- a/builtin/credential/okta/cli.go +++ b/builtin/credential/okta/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -64,12 +61,10 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro go func() { for { - timer := time.NewTimer(time.Second) select { case <-doneCh: - timer.Stop() return - case <-timer.C: + case <-time.After(time.Second): } resp, _ := c.Logical().Read(fmt.Sprintf("auth/%s/verify/%s", mount, nonce)) diff --git a/builtin/credential/okta/cmd/okta/main.go b/builtin/credential/okta/cmd/okta/main.go index e28b34a016c8c..e2452ba4b8ad9 100644 --- a/builtin/credential/okta/cmd/okta/main.go +++ b/builtin/credential/okta/cmd/okta/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: okta.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go index 045d4fdb37792..7fc93efb87c7d 100644 --- a/builtin/credential/okta/path_config.go +++ b/builtin/credential/okta/path_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -27,12 +24,6 @@ const ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - Action: "Configure", - }, - Fields: map[string]*framework.FieldSchema{ "organization": { Type: framework.TypeString, @@ -89,30 +80,18 @@ func pathConfig(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "configuration", - }, - }, - logical.CreateOperation: &framework.PathOperation{ - Callback: b.pathConfigWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigRead, + logical.CreateOperation: b.pathConfigWrite, + logical.UpdateOperation: b.pathConfigWrite, }, ExistenceCheck: b.pathConfigExistenceCheck, HelpSynopsis: pathConfigHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Configure", + }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/okta/path_groups.go b/builtin/credential/okta/path_groups.go index 753c1cacdd517..f9ff0225ac98c 100644 --- a/builtin/credential/okta/path_groups.go +++ b/builtin/credential/okta/path_groups.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -16,33 +13,22 @@ func pathGroupsList(b *backend) *framework.Path { return &framework.Path{ Pattern: "groups/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - OperationSuffix: "groups", - Navigation: true, - ItemType: "Group", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathGroupList, }, HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "Group", + }, } } func pathGroups(b *backend) *framework.Path { return &framework.Path{ Pattern: `groups/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - OperationSuffix: "group", - Action: "Create", - ItemType: "Group", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -52,9 +38,6 @@ func pathGroups(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of policies associated to the group.", - }, }, }, @@ -66,6 +49,10 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "Group", + }, } } diff --git a/builtin/credential/okta/path_groups_test.go b/builtin/credential/okta/path_groups_test.go index 8e4ba8cc2d5d6..84253f379fd8a 100644 --- a/builtin/credential/okta/path_groups_test.go +++ b/builtin/credential/okta/path_groups_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index 1f2cb090a76c5..0f8967576bb7c 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -23,12 +20,6 @@ const ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - OperationVerb: "login", - }, - Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -198,10 +189,6 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: `verify/(?P.+)`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - OperationVerb: "verify", - }, Fields: map[string]*framework.FieldSchema{ "nonce": { Type: framework.TypeString, diff --git a/builtin/credential/okta/path_users.go b/builtin/credential/okta/path_users.go index 3c38e855548b3..bd5fdc0ebbe0d 100644 --- a/builtin/credential/okta/path_users.go +++ b/builtin/credential/okta/path_users.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package okta import ( @@ -14,33 +11,22 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - OperationSuffix: "users", - Navigation: true, - ItemType: "User", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "User", + }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixOkta, - OperationSuffix: "user", - Action: "Create", - ItemType: "User", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -66,6 +52,10 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "User", + }, } } diff --git a/builtin/credential/radius/backend.go b/builtin/credential/radius/backend.go index 3ec37a6fe634d..03da06efd9bbf 100644 --- a/builtin/credential/radius/backend.go +++ b/builtin/credential/radius/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package radius import ( @@ -10,8 +7,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const operationPrefixRadius = "radius" - func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/credential/radius/backend_test.go b/builtin/credential/radius/backend_test.go index 9e9567f470645..de90b3b79d6e9 100644 --- a/builtin/credential/radius/backend_test.go +++ b/builtin/credential/radius/backend_test.go @@ -1,22 +1,21 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package radius import ( "context" "fmt" + "io" "os" "reflect" - "runtime" "strconv" "strings" "testing" "time" + "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/logical" + + "github.com/docker/docker/api/types" ) const ( @@ -31,15 +30,40 @@ const ( ) func prepareRadiusTestContainer(t *testing.T) (func(), string, int) { - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - if os.Getenv(envRadiusRadiusHost) != "" { port, _ := strconv.Atoi(os.Getenv(envRadiusPort)) return func() {}, os.Getenv(envRadiusRadiusHost), port } + radiusdOptions := []string{"radiusd", "-f", "-l", "stdout", "-X"} + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "jumanjiman/radiusd", + ImageTag: "latest", + ContainerName: "radiusd", + // Switch the entry point for this operation; we want to sleep + // instead of exec'ing radiusd, as we first need to write a new + // client configuration. radiusd's SIGHUP handler does not reload + // this config file, hence we choose to manually start radiusd + // below. + Entrypoint: []string{"sleep", "3600"}, + Ports: []string{"1812/udp"}, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, + }) + if err != nil { + t.Fatalf("Could not start docker radiusd: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + return docker.NewServiceHostPort(host, port), nil + }) + if err != nil { + t.Fatalf("Could not start docker radiusd: %s", err) + } + // Now allow any client to connect to this radiusd instance by writing our // own clients.conf file. // @@ -50,8 +74,7 @@ func prepareRadiusTestContainer(t *testing.T) (func(), string, int) { // // See also: https://freeradius.org/radiusd/man/clients.conf.html ctx := context.Background() - clientsConfig := ` -client 0.0.0.0/1 { + clientsConfig := `client 0.0.0.0/1 { ipaddr = 0.0.0.0/1 secret = testing123 shortname = all-clients-first @@ -61,55 +84,45 @@ client 128.0.0.0/1 { ipaddr = 128.0.0.0/1 secret = testing123 shortname = all-clients-second -} -` - - containerfile := ` -FROM docker.mirror.hashicorp.services/jumanjiman/radiusd:latest - -COPY clients.conf /etc/raddb/clients.conf -` - - bCtx := docker.NewBuildContext() - bCtx["clients.conf"] = docker.PathContentsFromBytes([]byte(clientsConfig)) - - imageName := "vault_radiusd_any_client" - imageTag := "latest" - - runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: imageName, - ImageTag: imageTag, - ContainerName: "radiusd", - Cmd: []string{"-f", "-l", "stdout", "-X"}, - Ports: []string{"1812/udp"}, - LogConsumer: func(s string) { - if t.Failed() { - t.Logf("container logs: %s", s) - } - }, +}` + ret, err := runner.DockerAPI.ContainerExecCreate(ctx, svc.Container.ID, types.ExecConfig{ + User: "0", + AttachStderr: true, + AttachStdout: true, + // Hack: write this via echo, since it exists in the container. + Cmd: []string{"sh", "-c", "echo '" + clientsConfig + "' > /etc/raddb/clients.conf"}, }) if err != nil { - t.Fatalf("Could not provision docker service runner: %s", err) + t.Fatalf("Failed to update radiusd client config: error creating command: %v", err) } - - output, err := runner.BuildImage(ctx, containerfile, bCtx, - docker.BuildRemove(true), docker.BuildForceRemove(true), - docker.BuildPullParent(true), - docker.BuildTags([]string{imageName + ":" + imageTag})) + resp, err := runner.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) if err != nil { - t.Fatalf("Could not build new image: %v", err) + t.Fatalf("Failed to update radiusd client config: error attaching command: %v", err) } - - t.Logf("Image build output: %v", string(output)) - - svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { - time.Sleep(2 * time.Second) - return docker.NewServiceHostPort(host, port), nil + read, err := io.ReadAll(resp.Reader) + t.Logf("Command Output (%v):\n%v", err, string(read)) + + ret, err = runner.DockerAPI.ContainerExecCreate(ctx, svc.Container.ID, types.ExecConfig{ + User: "0", + AttachStderr: true, + AttachStdout: true, + // As noted above, we need to start radiusd manually now. + Cmd: radiusdOptions, }) if err != nil { - t.Fatalf("Could not start docker radiusd: %s", err) + t.Fatalf("Failed to start radiusd service: error creating command: %v", err) + } + err = runner.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + t.Fatalf("Failed to start radiusd service: error starting command: %v", err) } + // Give radiusd time to start... + // + // There's no straightfoward way to check the state, but the server starts + // up quick so a 2 second sleep should be enough. + time.Sleep(2 * time.Second) + pieces := strings.Split(svc.Config.Address(), ":") port, _ := strconv.Atoi(pieces[1]) return svc.Cleanup, pieces[0], port diff --git a/builtin/credential/radius/cmd/radius/main.go b/builtin/credential/radius/cmd/radius/main.go index b3045a31a1a73..9ab5a636948c7 100644 --- a/builtin/credential/radius/cmd/radius/main.go +++ b/builtin/credential/radius/cmd/radius/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: radius.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go index 6bdc2967361d2..33d4d0d99175d 100644 --- a/builtin/credential/radius/path_config.go +++ b/builtin/credential/radius/path_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package radius import ( @@ -15,12 +12,6 @@ import ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRadius, - Action: "Configure", - }, - Fields: map[string]*framework.FieldSchema{ "host": { Type: framework.TypeString, @@ -44,10 +35,9 @@ func pathConfig(b *backend) *framework.Path { "unregistered_user_policies": { Type: framework.TypeString, Default: "", - Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", + Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: empty)", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Policies for unregistered users", - Description: "List of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", + Name: "Policies for unregistered users", }, }, "dial_timeout": { @@ -87,29 +77,17 @@ func pathConfig(b *backend) *framework.Path { ExistenceCheck: b.configExistenceCheck, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "configuration", - }, - }, - logical.CreateOperation: &framework.PathOperation{ - Callback: b.pathConfigCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigCreateUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigRead, + logical.CreateOperation: b.pathConfigCreateUpdate, + logical.UpdateOperation: b.pathConfigCreateUpdate, }, HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Configure", + }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/radius/path_login.go b/builtin/credential/radius/path_login.go index 6feaf1bfcaf40..c8a1ab8f43ed9 100644 --- a/builtin/credential/radius/path_login.go +++ b/builtin/credential/radius/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package radius import ( @@ -23,13 +20,6 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login" + framework.OptionalParamRegex("urlusername"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRadius, - OperationVerb: "login", - OperationSuffix: "|with-username", - }, - Fields: map[string]*framework.FieldSchema{ "urlusername": { Type: framework.TypeString, diff --git a/builtin/credential/radius/path_users.go b/builtin/credential/radius/path_users.go index 63ac5bbc2e878..de7b5d4690e6e 100644 --- a/builtin/credential/radius/path_users.go +++ b/builtin/credential/radius/path_users.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package radius import ( @@ -17,33 +14,22 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRadius, - OperationSuffix: "users", - Navigation: true, - ItemType: "User", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "User", + }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRadius, - OperationSuffix: "user", - Action: "Create", - ItemType: "User", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -53,9 +39,6 @@ func pathUsers(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the user.", - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of policies associated to the user.", - }, }, }, @@ -70,6 +53,10 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "User", + }, } } diff --git a/builtin/credential/token/cli.go b/builtin/credential/token/cli.go index 853d6eade7c45..64a88169cbe7e 100644 --- a/builtin/credential/token/cli.go +++ b/builtin/credential/token/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/builtin/credential/userpass/backend.go b/builtin/credential/userpass/backend.go index 428e8b25ca766..aa45dc3766db2 100644 --- a/builtin/credential/userpass/backend.go +++ b/builtin/credential/userpass/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( @@ -10,8 +7,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const operationPrefixUserpass = "userpass" - func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go index 3df8cfa2a9bb5..83f79db9a4e1e 100644 --- a/builtin/credential/userpass/backend_test.go +++ b/builtin/credential/userpass/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( diff --git a/builtin/credential/userpass/cli.go b/builtin/credential/userpass/cli.go index e100ae9f244e5..092d0927ef1f8 100644 --- a/builtin/credential/userpass/cli.go +++ b/builtin/credential/userpass/cli.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( diff --git a/builtin/credential/userpass/cmd/userpass/main.go b/builtin/credential/userpass/cmd/userpass/main.go index 4747a56f44097..5ea1894d219e5 100644 --- a/builtin/credential/userpass/cmd/userpass/main.go +++ b/builtin/credential/userpass/cmd/userpass/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -19,11 +16,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: userpass.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/userpass/path_login.go b/builtin/credential/userpass/path_login.go index 0565864471e02..95d63f28c6260 100644 --- a/builtin/credential/userpass/path_login.go +++ b/builtin/credential/userpass/path_login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( @@ -19,12 +16,6 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login/" + framework.GenericNameRegex("username"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixUserpass, - OperationVerb: "login", - }, - Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -48,7 +39,7 @@ func pathLogin(b *backend) *framework.Path { } func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - username := d.Get("username").(string) + username := strings.ToLower(d.Get("username").(string)) if username == "" { return nil, fmt.Errorf("missing username") } @@ -95,26 +86,14 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew // Check for a password match. Check for a hash collision for Vault 0.2+, // but handle the older legacy passwords with a constant time comparison. passwordBytes := []byte(password) - switch { - case !legacyPassword: + if !legacyPassword { if err := bcrypt.CompareHashAndPassword(userPassword, passwordBytes); err != nil { - // The failed login info of existing users alone are tracked as only - // existing user's failed login information is stored in storage for optimization - if user == nil || userError != nil { - return logical.ErrorResponse("invalid username or password"), nil - } - return logical.ErrorResponse("invalid username or password"), logical.ErrInvalidCredentials + return logical.ErrorResponse("invalid username or password"), nil } - default: + } else { if subtle.ConstantTimeCompare(userPassword, passwordBytes) != 1 { - // The failed login info of existing users alone are tracked as only - // existing user's failed login information is stored in storage for optimization - if user == nil || userError != nil { - return logical.ErrorResponse("invalid username or password"), nil - } - return logical.ErrorResponse("invalid username or password"), logical.ErrInvalidCredentials + return logical.ErrorResponse("invalid username or password"), nil } - } if userError != nil { diff --git a/builtin/credential/userpass/path_user_password.go b/builtin/credential/userpass/path_user_password.go index 63b52ca0ca1a4..5007497304789 100644 --- a/builtin/credential/userpass/path_user_password.go +++ b/builtin/credential/userpass/path_user_password.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( @@ -16,13 +13,6 @@ import ( func pathUserPassword(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/password$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixUserpass, - OperationVerb: "reset", - OperationSuffix: "password", - }, - Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, diff --git a/builtin/credential/userpass/path_user_policies.go b/builtin/credential/userpass/path_user_policies.go index 8c5a9174ae966..3c017253869e2 100644 --- a/builtin/credential/userpass/path_user_policies.go +++ b/builtin/credential/userpass/path_user_policies.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( @@ -16,13 +13,6 @@ import ( func pathUserPolicies(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixUserpass, - OperationVerb: "update", - OperationSuffix: "policies", - }, - Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -36,9 +26,6 @@ func pathUserPolicies(b *backend) *framework.Path { "token_policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", - DisplayAttrs: &framework.DisplayAttributes{ - Description: "A list of policies that will apply to the generated token for this user.", - }, }, }, diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go index 221fc2c4fcbf3..7ec22c5fbd455 100644 --- a/builtin/credential/userpass/path_users.go +++ b/builtin/credential/userpass/path_users.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( @@ -19,33 +16,22 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixUserpass, - OperationSuffix: "users", - Navigation: true, - ItemType: "User", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Navigation: true, + ItemType: "User", + }, } } func pathUsers(b *backend) *framework.Path { p := &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixUserpass, - OperationSuffix: "user", - Action: "Create", - ItemType: "User", - }, - Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -96,6 +82,10 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, + DisplayAttrs: &framework.DisplayAttributes{ + Action: "Create", + ItemType: "User", + }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/userpass/stepwise_test.go b/builtin/credential/userpass/stepwise_test.go index ab797ed200c96..90820b883d273 100644 --- a/builtin/credential/userpass/stepwise_test.go +++ b/builtin/credential/userpass/stepwise_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package userpass import ( @@ -19,7 +16,7 @@ func TestAccBackend_stepwise_UserCrud(t *testing.T) { customPluginName := "my-userpass" envOptions := &stepwise.MountOptions{ RegistryName: customPluginName, - PluginType: api.PluginTypeCredential, + PluginType: stepwise.PluginTypeCredential, PluginName: "userpass", MountPathPrefix: customPluginName, } diff --git a/builtin/logical/aws/backend.go b/builtin/logical/aws/backend.go index d93c394f98fd1..9c5abe1e82e80 100644 --- a/builtin/logical/aws/backend.go +++ b/builtin/logical/aws/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -12,29 +9,24 @@ import ( "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/queue" ) const ( rootConfigPath = "config/root" minAwsUserRollbackAge = 5 * time.Minute - operationPrefixAWS = "aws" - operationPrefixAWSASD = "aws-config" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend(conf) + b := Backend() if err := b.Setup(ctx, conf); err != nil { return nil, err } return b, nil } -func Backend(conf *logical.BackendConfig) *backend { +func Backend() *backend { var b backend - b.credRotationQueue = queue.New() b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), @@ -43,8 +35,7 @@ func Backend(conf *logical.BackendConfig) *backend { framework.WALPrefix, }, SealWrapStorage: []string{ - rootConfigPath, - pathStaticCreds + "/", + "config/root", }, }, @@ -54,8 +45,6 @@ func Backend(conf *logical.BackendConfig) *backend { pathConfigLease(&b), pathRoles(&b), pathListRoles(&b), - pathStaticRoles(&b), - pathStaticCredentials(&b), pathUser(&b), }, @@ -66,17 +55,7 @@ func Backend(conf *logical.BackendConfig) *backend { Invalidate: b.invalidate, WALRollback: b.walRollback, WALRollbackMinAge: minAwsUserRollbackAge, - PeriodicFunc: func(ctx context.Context, req *logical.Request) error { - repState := conf.System.ReplicationState() - if (conf.System.LocalMount() || - !repState.HasState(consts.ReplicationPerformanceSecondary)) && - !repState.HasState(consts.ReplicationDRSecondary) && - !repState.HasState(consts.ReplicationPerformanceStandby) { - return b.rotateExpiredStaticCreds(ctx, req) - } - return nil - }, - BackendType: logical.TypeLogical, + BackendType: logical.TypeLogical, } return &b @@ -95,10 +74,6 @@ type backend struct { // to enable mocking with AWS iface for tests iamClient iamiface.IAMAPI stsClient stsiface.STSAPI - - // the age of a static role's credential is tracked by a priority queue and handled - // by the PeriodicFunc - credRotationQueue *queue.PriorityQueue } const backendHelp = ` diff --git a/builtin/logical/aws/backend_test.go b/builtin/logical/aws/backend_test.go index 260bcc6d645b7..4ca6e66c916d2 100644 --- a/builtin/logical/aws/backend_test.go +++ b/builtin/logical/aws/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -15,11 +12,9 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" @@ -40,7 +35,7 @@ type mockIAMClient struct { iamiface.IAMAPI } -func (m *mockIAMClient) CreateUserWithContext(_ aws.Context, input *iam.CreateUserInput, _ ...request.Option) (*iam.CreateUserOutput, error) { +func (m *mockIAMClient) CreateUser(input *iam.CreateUserInput) (*iam.CreateUserOutput, error) { return nil, awserr.New("Throttling", "", nil) } @@ -148,7 +143,7 @@ func TestBackend_throttled(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -200,10 +195,6 @@ func TestBackend_throttled(t *testing.T) { } func testAccPreCheck(t *testing.T) { - if !hasAWSCredentials() { - t.Skip("Skipping because AWS credentials could not be resolved. See https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials for information on how to set up AWS credentials.") - } - initSetup.Do(func() { if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" { log.Println("[INFO] Test: Using us-west-2 as test region") @@ -212,23 +203,6 @@ func testAccPreCheck(t *testing.T) { }) } -func hasAWSCredentials() bool { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - cfg, err := config.LoadDefaultConfig(ctx) - if err != nil { - return false - } - - creds, err := cfg.Credentials.Retrieve(ctx) - if err != nil { - return false - } - - return creds.HasKeys() -} - func getAccountID() (string, error) { awsConfig := &aws.Config{ Region: aws.String("us-east-1"), @@ -261,11 +235,8 @@ func createRole(t *testing.T, roleName, awsAccountID string, policyARNs []string "Principal": { "AWS": "arn:aws:iam::%s:root" }, - "Action": [ - "sts:AssumeRole", - "sts:SetSourceIdentity" - ] - } + "Action": "sts:AssumeRole" + } ] } ` @@ -684,7 +655,7 @@ func testAccStepRead(t *testing.T, path, name string, credentialTests []credenti } } -func testAccStepReadSTSResponse(name string, maximumTTL time.Duration) logicaltest.TestStep { +func testAccStepReadTTL(name string, maximumTTL time.Duration) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "creds/" + name, @@ -1345,7 +1316,7 @@ func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { Steps: []logicaltest.TestStep{ testAccStepConfig(t), testAccStepWriteRole(t, "test", roleData), - testAccStepReadSTSResponse("test", time.Duration(minAwsAssumeRoleDuration)*time.Second), // allow a little slack + testAccStepReadTTL("test", time.Duration(minAwsAssumeRoleDuration)*time.Second), // allow a little slack }, Teardown: func() error { return deleteTestRole(roleName) diff --git a/builtin/logical/aws/client.go b/builtin/logical/aws/client.go index 71d24f3abb2e8..80d839ed55edf 100644 --- a/builtin/logical/aws/client.go +++ b/builtin/logical/aws/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( diff --git a/builtin/logical/aws/cmd/aws/main.go b/builtin/logical/aws/cmd/aws/main.go index 28de1eb3f8d93..74f7d97a7b868 100644 --- a/builtin/logical/aws/cmd/aws/main.go +++ b/builtin/logical/aws/cmd/aws/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: aws.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/aws/iam_policies.go b/builtin/logical/aws/iam_policies.go index 002a7389eb161..caf79e33d310b 100644 --- a/builtin/logical/aws/iam_policies.go +++ b/builtin/logical/aws/iam_policies.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -73,7 +70,7 @@ func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGr for _, g := range iamGroups { // Collect managed policy ARNs from the IAM Group - agp, err = iamClient.ListAttachedGroupPoliciesWithContext(ctx, &iam.ListAttachedGroupPoliciesInput{ + agp, err = iamClient.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{ GroupName: aws.String(g), }) if err != nil { @@ -84,14 +81,14 @@ func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGr } // Collect inline policy names from the IAM Group - inlinePolicies, err = iamClient.ListGroupPoliciesWithContext(ctx, &iam.ListGroupPoliciesInput{ + inlinePolicies, err = iamClient.ListGroupPolicies(&iam.ListGroupPoliciesInput{ GroupName: aws.String(g), }) if err != nil { return nil, nil, err } for _, iP := range inlinePolicies.PolicyNames { - inlinePolicyDoc, err = iamClient.GetGroupPolicyWithContext(ctx, &iam.GetGroupPolicyInput{ + inlinePolicyDoc, err = iamClient.GetGroupPolicy(&iam.GetGroupPolicyInput{ GroupName: &g, PolicyName: iP, }) diff --git a/builtin/logical/aws/iam_policies_test.go b/builtin/logical/aws/iam_policies_test.go index 7f8f96adb87ca..ddba67f6b8bdc 100644 --- a/builtin/logical/aws/iam_policies_test.go +++ b/builtin/logical/aws/iam_policies_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -8,7 +5,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/hashicorp/vault/sdk/logical" @@ -30,15 +26,15 @@ type mockGroupIAMClient struct { GetGroupPolicyResp iam.GetGroupPolicyOutput } -func (m mockGroupIAMClient) ListAttachedGroupPoliciesWithContext(_ aws.Context, in *iam.ListAttachedGroupPoliciesInput, _ ...request.Option) (*iam.ListAttachedGroupPoliciesOutput, error) { +func (m mockGroupIAMClient) ListAttachedGroupPolicies(in *iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) { return &m.ListAttachedGroupPoliciesResp, nil } -func (m mockGroupIAMClient) ListGroupPoliciesWithContext(_ aws.Context, in *iam.ListGroupPoliciesInput, _ ...request.Option) (*iam.ListGroupPoliciesOutput, error) { +func (m mockGroupIAMClient) ListGroupPolicies(in *iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) { return &m.ListGroupPoliciesResp, nil } -func (m mockGroupIAMClient) GetGroupPolicyWithContext(_ aws.Context, in *iam.GetGroupPolicyInput, _ ...request.Option) (*iam.GetGroupPolicyOutput, error) { +func (m mockGroupIAMClient) GetGroupPolicy(in *iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) { return &m.GetGroupPolicyResp, nil } @@ -141,7 +137,7 @@ func Test_getGroupPolicies(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/path_config_lease.go b/builtin/logical/aws/path_config_lease.go index 1b01388a3b8a6..b953b2305e3c6 100644 --- a/builtin/logical/aws/path_config_lease.go +++ b/builtin/logical/aws/path_config_lease.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -15,11 +12,6 @@ import ( func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "lease": { Type: framework.TypeString, @@ -32,20 +24,9 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathLeaseRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "lease-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathLeaseWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "lease", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseWrite, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/aws/path_config_root.go b/builtin/logical/aws/path_config_root.go index bd6c09e0e5e3b..1262980fa8066 100644 --- a/builtin/logical/aws/path_config_root.go +++ b/builtin/logical/aws/path_config_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -17,11 +14,6 @@ const defaultUserNameTemplate = `{{ if (eq .Type "STS") }}{{ printf "vault-%s-%s func pathConfigRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/root", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - }, - Fields: map[string]*framework.FieldSchema{ "access_key": { Type: framework.TypeString, @@ -56,20 +48,9 @@ func pathConfigRoot(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRootRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "root-iam-credentials-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigRootWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "root-iam-credentials", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigRootRead, + logical.UpdateOperation: b.pathConfigRootWrite, }, HelpSynopsis: pathConfigRootHelpSyn, diff --git a/builtin/logical/aws/path_config_root_test.go b/builtin/logical/aws/path_config_root_test.go index a007064904179..d90ee6cacb38d 100644 --- a/builtin/logical/aws/path_config_root_test.go +++ b/builtin/logical/aws/path_config_root_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -15,7 +12,7 @@ func TestBackend_PathConfigRoot(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/path_config_rotate_root.go b/builtin/logical/aws/path_config_rotate_root.go index 0434d22e54c8c..1f7ca31133661 100644 --- a/builtin/logical/aws/path_config_rotate_root.go +++ b/builtin/logical/aws/path_config_rotate_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -16,13 +13,6 @@ import ( func pathConfigRotateRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/rotate-root", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "root-iam-credentials", - OperationVerb: "rotate", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigRotateRootUpdate, @@ -66,7 +56,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R } var getUserInput iam.GetUserInput // empty input means get current user - getUserRes, err := client.GetUserWithContext(ctx, &getUserInput) + getUserRes, err := client.GetUser(&getUserInput) if err != nil { return nil, fmt.Errorf("error calling GetUser: %w", err) } @@ -83,7 +73,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R createAccessKeyInput := iam.CreateAccessKeyInput{ UserName: getUserRes.User.UserName, } - createAccessKeyRes, err := client.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) + createAccessKeyRes, err := client.CreateAccessKey(&createAccessKeyInput) if err != nil { return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) } @@ -114,7 +104,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: aws.String(oldAccessKey), UserName: getUserRes.User.UserName, } - _, err = client.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput) + _, err = client.DeleteAccessKey(&deleteAccessKeyInput) if err != nil { return nil, fmt.Errorf("error deleting old access key: %w", err) } diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index 67545e641434d..a7c3dd84a8967 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -27,11 +24,6 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "roles", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -44,18 +36,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationSuffix: "role", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, - Description: "Name of the role", + Description: "Name of the policy", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Role Name", + Name: "Policy Name", }, }, diff --git a/builtin/logical/aws/path_roles_test.go b/builtin/logical/aws/path_roles_test.go index c5bf167866cc9..39c9d90811fc4 100644 --- a/builtin/logical/aws/path_roles_test.go +++ b/builtin/logical/aws/path_roles_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -21,7 +18,7 @@ func TestBackend_PathListRoles(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -224,7 +221,7 @@ func TestRoleCRUDWithPermissionsBoundary(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -268,7 +265,7 @@ func TestRoleWithPermissionsBoundaryValidation(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/path_static_creds.go b/builtin/logical/aws/path_static_creds.go deleted file mode 100644 index 119f5d0b3debd..0000000000000 --- a/builtin/logical/aws/path_static_creds.go +++ /dev/null @@ -1,99 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "net/http" - - "github.com/fatih/structs" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - pathStaticCreds = "static-creds" - - paramAccessKeyID = "access_key" - paramSecretsAccessKey = "secret_key" -) - -type awsCredentials struct { - AccessKeyID string `json:"access_key" structs:"access_key" mapstructure:"access_key"` - SecretAccessKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"` -} - -func pathStaticCredentials(b *backend) *framework.Path { - return &framework.Path{ - Pattern: fmt.Sprintf("%s/%s", pathStaticCreds, framework.GenericNameWithAtRegex(paramRoleName)), - Fields: map[string]*framework.FieldSchema{ - paramRoleName: { - Type: framework.TypeString, - Description: descRoleName, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathStaticCredsRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: http.StatusText(http.StatusOK), - Fields: map[string]*framework.FieldSchema{ - paramAccessKeyID: { - Type: framework.TypeString, - Description: descAccessKeyID, - }, - paramSecretsAccessKey: { - Type: framework.TypeString, - Description: descSecretAccessKey, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: pathStaticCredsHelpSyn, - HelpDescription: pathStaticCredsHelpDesc, - } -} - -func (b *backend) pathStaticCredsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleName, ok := data.GetOk(paramRoleName) - if !ok { - return nil, fmt.Errorf("missing %q parameter", paramRoleName) - } - - entry, err := req.Storage.Get(ctx, formatCredsStoragePath(roleName.(string))) - if err != nil { - return nil, fmt.Errorf("failed to read credentials for role %q: %w", roleName, err) - } - if entry == nil { - return nil, nil - } - - var credentials awsCredentials - if err := entry.DecodeJSON(&credentials); err != nil { - return nil, fmt.Errorf("failed to decode credentials: %w", err) - } - - return &logical.Response{ - Data: structs.New(credentials).Map(), - }, nil -} - -func formatCredsStoragePath(roleName string) string { - return fmt.Sprintf("%s/%s", pathStaticCreds, roleName) -} - -const pathStaticCredsHelpSyn = `Retrieve static credentials from the named role.` - -const pathStaticCredsHelpDesc = ` -This path reads AWS credentials for a certain static role. The keys are rotated -periodically according to their configuration, and will return the same password -until they are rotated.` - -const ( - descAccessKeyID = "The access key of the AWS Credential" - descSecretAccessKey = "The secret key of the AWS Credential" -) diff --git a/builtin/logical/aws/path_static_creds_test.go b/builtin/logical/aws/path_static_creds_test.go deleted file mode 100644 index c478e3f74358c..0000000000000 --- a/builtin/logical/aws/path_static_creds_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package aws - -import ( - "context" - "reflect" - "testing" - - "github.com/fatih/structs" - - "github.com/hashicorp/vault/sdk/framework" - - "github.com/hashicorp/vault/sdk/logical" -) - -// TestStaticCredsRead verifies that we can correctly read a cred that exists, and correctly _not read_ -// a cred that does not exist. -func TestStaticCredsRead(t *testing.T) { - // setup - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - bgCTX := context.Background() // for brevity later - - // insert a cred to get - creds := &awsCredentials{ - AccessKeyID: "foo", - SecretAccessKey: "bar", - } - entry, err := logical.StorageEntryJSON(formatCredsStoragePath("test"), creds) - if err != nil { - t.Fatal(err) - } - err = config.StorageView.Put(bgCTX, entry) - if err != nil { - t.Fatal(err) - } - - // cases - cases := []struct { - name string - roleName string - expectedError error - expectedResponse *logical.Response - }{ - { - name: "get existing creds", - roleName: "test", - expectedResponse: &logical.Response{ - Data: structs.New(creds).Map(), - }, - }, - { - name: "get non-existent creds", - roleName: "this-doesnt-exist", - // returns nil, nil - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - b := Backend(config) - - req := &logical.Request{ - Storage: config.StorageView, - Data: map[string]interface{}{ - "name": c.roleName, - }, - } - resp, err := b.pathStaticCredsRead(bgCTX, req, staticCredsFieldData(req.Data)) - - if err != c.expectedError { - t.Fatalf("got error %q, but expected %q", err, c.expectedError) - } - if !reflect.DeepEqual(resp, c.expectedResponse) { - t.Fatalf("got response %v, but expected %v", resp, c.expectedResponse) - } - }) - } -} - -func staticCredsFieldData(data map[string]interface{}) *framework.FieldData { - schema := map[string]*framework.FieldSchema{ - paramRoleName: { - Type: framework.TypeString, - Description: descRoleName, - }, - } - - return &framework.FieldData{ - Raw: data, - Schema: schema, - } -} diff --git a/builtin/logical/aws/path_static_roles.go b/builtin/logical/aws/path_static_roles.go deleted file mode 100644 index b0aa3b02c7716..0000000000000 --- a/builtin/logical/aws/path_static_roles.go +++ /dev/null @@ -1,331 +0,0 @@ -package aws - -import ( - "context" - "errors" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/fatih/structs" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/queue" -) - -const ( - pathStaticRole = "static-roles" - - paramRoleName = "name" - paramUsername = "username" - paramRotationPeriod = "rotation_period" -) - -type staticRoleEntry struct { - Name string `json:"name" structs:"name" mapstructure:"name"` - ID string `json:"id" structs:"id" mapstructure:"id"` - Username string `json:"username" structs:"username" mapstructure:"username"` - RotationPeriod time.Duration `json:"rotation_period" structs:"rotation_period" mapstructure:"rotation_period"` -} - -func pathStaticRoles(b *backend) *framework.Path { - roleResponse := map[int][]framework.Response{ - http.StatusOK: {{ - Description: http.StatusText(http.StatusOK), - Fields: map[string]*framework.FieldSchema{ - paramRoleName: { - Type: framework.TypeString, - Description: descRoleName, - }, - paramUsername: { - Type: framework.TypeString, - Description: descUsername, - }, - paramRotationPeriod: { - Type: framework.TypeDurationSecond, - Description: descRotationPeriod, - }, - }, - }}, - } - - return &framework.Path{ - Pattern: fmt.Sprintf("%s/%s", pathStaticRole, framework.GenericNameWithAtRegex(paramRoleName)), - Fields: map[string]*framework.FieldSchema{ - paramRoleName: { - Type: framework.TypeString, - Description: descRoleName, - }, - paramUsername: { - Type: framework.TypeString, - Description: descUsername, - }, - paramRotationPeriod: { - Type: framework.TypeDurationSecond, - Description: descRotationPeriod, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathStaticRolesRead, - Responses: roleResponse, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathStaticRolesWrite, - ForwardPerformanceSecondary: true, - ForwardPerformanceStandby: true, - Responses: roleResponse, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathStaticRolesDelete, - ForwardPerformanceSecondary: true, - ForwardPerformanceStandby: true, - Responses: map[int][]framework.Response{ - http.StatusNoContent: {{ - Description: http.StatusText(http.StatusNoContent), - }}, - }, - }, - }, - - HelpSynopsis: pathStaticRolesHelpSyn, - HelpDescription: pathStaticRolesHelpDesc, - } -} - -func (b *backend) pathStaticRolesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleName, ok := data.GetOk(paramRoleName) - if !ok { - return nil, fmt.Errorf("missing %q parameter", paramRoleName) - } - - b.roleMutex.RLock() - defer b.roleMutex.RUnlock() - - entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) - if err != nil { - return nil, fmt.Errorf("failed to read configuration for static role %q: %w", roleName, err) - } - if entry == nil { - return nil, nil - } - - var config staticRoleEntry - if err := entry.DecodeJSON(&config); err != nil { - return nil, fmt.Errorf("failed to decode configuration for static role %q: %w", roleName, err) - } - - return &logical.Response{ - Data: formatResponse(config), - }, nil -} - -func (b *backend) pathStaticRolesWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // Create & validate config from request parameters - config := staticRoleEntry{} - isCreate := req.Operation == logical.CreateOperation - - if rawRoleName, ok := data.GetOk(paramRoleName); ok { - config.Name = rawRoleName.(string) - - if err := b.validateRoleName(config.Name); err != nil { - return nil, err - } - } else { - return logical.ErrorResponse("missing %q parameter", paramRoleName), nil - } - - // retrieve old role value - entry, err := req.Storage.Get(ctx, formatRoleStoragePath(config.Name)) - if err != nil { - return nil, fmt.Errorf("couldn't check storage for pre-existing role: %w", err) - } - - if entry != nil { - err = entry.DecodeJSON(&config) - if err != nil { - return nil, fmt.Errorf("couldn't convert existing role into config struct: %w", err) - } - } else { - // if we couldn't find an entry, this is a create event - isCreate = true - } - - // other params are optional if we're not Creating - - if rawUsername, ok := data.GetOk(paramUsername); ok { - config.Username = rawUsername.(string) - - if err := b.validateIAMUserExists(ctx, req.Storage, &config, isCreate); err != nil { - return nil, err - } - } else if isCreate { - return logical.ErrorResponse("missing %q parameter", paramUsername), nil - } - - if rawRotationPeriod, ok := data.GetOk(paramRotationPeriod); ok { - config.RotationPeriod = time.Duration(rawRotationPeriod.(int)) * time.Second - - if err := b.validateRotationPeriod(config.RotationPeriod); err != nil { - return nil, err - } - } else if isCreate { - return logical.ErrorResponse("missing %q parameter", paramRotationPeriod), nil - } - - b.roleMutex.Lock() - defer b.roleMutex.Unlock() - - // Upsert role config - newRole, err := logical.StorageEntryJSON(formatRoleStoragePath(config.Name), config) - if err != nil { - return nil, fmt.Errorf("failed to marshal object to JSON: %w", err) - } - err = req.Storage.Put(ctx, newRole) - if err != nil { - return nil, fmt.Errorf("failed to save object in storage: %w", err) - } - - // Bootstrap initial set of keys if they did not exist before. AWS Secret Access Keys can only be obtained on creation, - // so we need to boostrap new roles with a new initial set of keys to be able to serve valid credentials to Vault clients. - existingCreds, err := req.Storage.Get(ctx, formatCredsStoragePath(config.Name)) - if err != nil { - return nil, fmt.Errorf("unable to verify if credentials already exist for role %q: %w", config.Name, err) - } - if existingCreds == nil { - err := b.createCredential(ctx, req.Storage, config, false) - if err != nil { - return nil, fmt.Errorf("failed to create new credentials for role %q: %w", config.Name, err) - } - - err = b.credRotationQueue.Push(&queue.Item{ - Key: config.Name, - Value: config, - Priority: time.Now().Add(config.RotationPeriod).Unix(), - }) - if err != nil { - return nil, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", config.Name, err) - } - } - - return &logical.Response{ - Data: formatResponse(config), - }, nil -} - -func (b *backend) pathStaticRolesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleName, ok := data.GetOk(paramRoleName) - if !ok { - return nil, fmt.Errorf("missing %q parameter", paramRoleName) - } - - b.roleMutex.Lock() - defer b.roleMutex.Unlock() - - entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) - if err != nil { - return nil, fmt.Errorf("couldn't locate role in storage due to error: %w", err) - } - // no entry in storage, but no error either, congrats, it's deleted! - if entry == nil { - return nil, nil - } - var cfg staticRoleEntry - err = entry.DecodeJSON(&cfg) - if err != nil { - return nil, fmt.Errorf("couldn't convert storage entry to role config") - } - - err = b.deleteCredential(ctx, req.Storage, cfg, false) - if err != nil { - return nil, fmt.Errorf("failed to clean credentials while deleting role %q: %w", roleName.(string), err) - } - - // delete from the queue - _, err = b.credRotationQueue.PopByKey(cfg.Name) - if err != nil { - return nil, fmt.Errorf("couldn't delete key from queue: %w", err) - } - - return nil, req.Storage.Delete(ctx, formatRoleStoragePath(roleName.(string))) -} - -func (b *backend) validateRoleName(name string) error { - if name == "" { - return errors.New("empty role name attribute given") - } - return nil -} - -// validateIAMUser checks the user information we have for the role against the information on AWS. On a create, it uses the username -// to retrieve the user information and _sets_ the userID. On update, it validates the userID and username. -func (b *backend) validateIAMUserExists(ctx context.Context, storage logical.Storage, entry *staticRoleEntry, isCreate bool) error { - c, err := b.clientIAM(ctx, storage) - if err != nil { - return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) - } - - // we don't really care about the content of the result, just that it's not an error - out, err := c.GetUser(&iam.GetUserInput{ - UserName: aws.String(entry.Username), - }) - if err != nil || out.User == nil { - return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) - } - if *out.User.UserName != entry.Username { - return fmt.Errorf("AWS GetUser returned a username, but it didn't match: %q was requested, but %q was returned", entry.Username, *out.User.UserName) - } - - if !isCreate && *out.User.UserId != entry.ID { - return fmt.Errorf("AWS GetUser returned a user, but the ID did not match: %q was requested, but %q was returned", entry.ID, *out.User.UserId) - } else { - // if this is an insert, store the userID. This is the immutable part of an IAM user, but it's not exactly user-friendly. - // So, we allow users to specify usernames, but on updates we'll use the ID as a verification cross-check. - entry.ID = *out.User.UserId - } - - return nil -} - -const ( - minAllowableRotationPeriod = 1 * time.Minute -) - -func (b *backend) validateRotationPeriod(period time.Duration) error { - if period < minAllowableRotationPeriod { - return fmt.Errorf("role rotation period out of range: must be greater than %.2f seconds", minAllowableRotationPeriod.Seconds()) - } - return nil -} - -func formatResponse(cfg staticRoleEntry) map[string]interface{} { - response := structs.New(cfg).Map() - response[paramRotationPeriod] = int64(cfg.RotationPeriod.Seconds()) - - return response -} - -func formatRoleStoragePath(roleName string) string { - return fmt.Sprintf("%s/%s", pathStaticRole, roleName) -} - -const pathStaticRolesHelpSyn = ` -Manage static roles for AWS. -` - -const pathStaticRolesHelpDesc = ` -This path lets you manage static roles (users) for the AWS secret backend. -A static role is associated with a single IAM user, and manages the access -keys based on a rotation period, automatically rotating the credential. If -the IAM user has multiple access keys, the oldest key will be rotated. -` - -const ( - descRoleName = "The name of this role." - descUsername = "The IAM user to adopt as a static role." - descRotationPeriod = `Period by which to rotate the backing credential of the adopted user. -This can be a Go duration (e.g, '1m', 24h'), or an integer number of seconds.` -) diff --git a/builtin/logical/aws/path_static_roles_test.go b/builtin/logical/aws/path_static_roles_test.go deleted file mode 100644 index 205b42cd00978..0000000000000 --- a/builtin/logical/aws/path_static_roles_test.go +++ /dev/null @@ -1,490 +0,0 @@ -package aws - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/hashicorp/vault/sdk/queue" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/go-secure-stdlib/awsutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -// TestStaticRolesValidation verifies that valid requests pass validation and that invalid requests fail validation. -// This includes the user already existing in IAM roles, and the rotation period being sufficiently long. -func TestStaticRolesValidation(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - bgCTX := context.Background() // for brevity - - cases := []struct { - name string - opts []awsutil.MockIAMOption - requestData map[string]interface{} - isError bool - }{ - { - name: "all good", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), - SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), - UserName: aws.String("jane-doe"), - }, - }), - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{}, - IsTruncated: aws.Bool(false), - }), - }, - requestData: map[string]interface{}{ - "name": "test", - "username": "jane-doe", - "rotation_period": "1d", - }, - }, - { - name: "bad user", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserError(errors.New("oh no")), - }, - requestData: map[string]interface{}{ - "name": "test", - "username": "jane-doe", - "rotation_period": "24h", - }, - isError: true, - }, - { - name: "user mismatch", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("ms-impostor"), UserId: aws.String("fake-id")}}), - }, - requestData: map[string]interface{}{ - "name": "test", - "username": "jane-doe", - "rotation_period": "1d2h", - }, - isError: true, - }, - { - name: "bad rotation period", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), - }, - requestData: map[string]interface{}{ - "name": "test", - "username": "jane-doe", - "rotation_period": "45s", - }, - isError: true, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - b := Backend(config) - miam, err := awsutil.NewMockIAM(c.opts...)(nil) - if err != nil { - t.Fatal(err) - } - b.iamClient = miam - if err := b.Setup(bgCTX, config); err != nil { - t.Fatal(err) - } - req := &logical.Request{ - Operation: logical.UpdateOperation, - Storage: config.StorageView, - Data: c.requestData, - Path: "static-roles/test", - } - _, err = b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) - if c.isError && err == nil { - t.Fatal("expected an error but didn't get one") - } else if !c.isError && err != nil { - t.Fatalf("got an unexpected error: %s", err) - } - }) - } -} - -// TestStaticRolesWrite validates that we can write a new entry for a new static role, and that we correctly -// do not write if the request is invalid in some way. -func TestStaticRolesWrite(t *testing.T) { - bgCTX := context.Background() - - cases := []struct { - name string - opts []awsutil.MockIAMOption - data map[string]interface{} - expectedError bool - findUser bool - isUpdate bool - }{ - { - name: "happy path", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{}, - IsTruncated: aws.Bool(false), - }), - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), - SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), - UserName: aws.String("jane-doe"), - }, - }), - }, - data: map[string]interface{}{ - "name": "test", - "username": "jane-doe", - "rotation_period": "1d", - }, - // writes role, writes cred - findUser: true, - }, - { - name: "no aws user", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserError(errors.New("no such user, etc etc")), - }, - data: map[string]interface{}{ - "name": "test", - "username": "a-nony-mous", - "rotation_period": "15s", - }, - expectedError: true, - }, - { - name: "update existing user", - opts: []awsutil.MockIAMOption{ - awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}), - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{}, - IsTruncated: aws.Bool(false), - }), - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), - SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), - UserName: aws.String("john-doe"), - }, - }), - }, - data: map[string]interface{}{ - "name": "johnny", - "rotation_period": "19m", - }, - findUser: true, - isUpdate: true, - }, - } - - // if a user exists (user doesn't exist is tested in validation) - // we'll check how many keys the user has - if it's two, we delete one. - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - - miam, err := awsutil.NewMockIAM( - c.opts..., - )(nil) - if err != nil { - t.Fatal(err) - } - - b := Backend(config) - b.iamClient = miam - if err := b.Setup(bgCTX, config); err != nil { - t.Fatal(err) - } - - // put a role in storage for update tests - staticRole := staticRoleEntry{ - Name: "johnny", - Username: "john-doe", - ID: "unique-id", - RotationPeriod: 24 * time.Hour, - } - entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) - if err != nil { - t.Fatal(err) - } - err = config.StorageView.Put(bgCTX, entry) - if err != nil { - t.Fatal(err) - } - - req := &logical.Request{ - Operation: logical.UpdateOperation, - Storage: config.StorageView, - Data: c.data, - Path: "static-roles/" + c.data["name"].(string), - } - - r, err := b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) - if c.expectedError && err == nil { - t.Fatal(err) - } else if c.expectedError { - return // save us some if statements - } - - if err != nil { - t.Fatalf("got an error back unexpectedly: %s", err) - } - - if c.findUser && r == nil { - t.Fatal("response was nil, but it shouldn't have been") - } - - role, err := config.StorageView.Get(bgCTX, req.Path) - if c.findUser && (err != nil || role == nil) { - t.Fatalf("couldn't find the role we should have stored: %s", err) - } - var actualData staticRoleEntry - err = role.DecodeJSON(&actualData) - if err != nil { - t.Fatalf("couldn't convert storage data to role entry: %s", err) - } - - // construct expected data - var expectedData staticRoleEntry - fieldData := staticRoleFieldData(c.data) - if c.isUpdate { - // data is johnny + c.data - expectedData = staticRole - } - - if u, ok := fieldData.GetOk("username"); ok { - expectedData.Username = u.(string) - } - if r, ok := fieldData.GetOk("rotation_period"); ok { - expectedData.RotationPeriod = time.Duration(r.(int)) * time.Second - } - if n, ok := fieldData.GetOk("name"); ok { - expectedData.Name = n.(string) - } - - // validate fields - if eu, au := expectedData.Username, actualData.Username; eu != au { - t.Fatalf("mismatched username, expected %q but got %q", eu, au) - } - if er, ar := expectedData.RotationPeriod, actualData.RotationPeriod; er != ar { - t.Fatalf("mismatched rotation period, expected %q but got %q", er, ar) - } - if en, an := expectedData.Name, actualData.Name; en != an { - t.Fatalf("mismatched role name, expected %q, but got %q", en, an) - } - }) - } -} - -// TestStaticRoleRead validates that we can read a configured role and correctly do not read anything if we -// request something that doesn't exist. -func TestStaticRoleRead(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - bgCTX := context.Background() - - // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" - cases := []struct { - name string - roleName string - found bool - }{ - { - name: "role name exists", - roleName: "test", - found: true, - }, - { - name: "role name not found", - roleName: "toast", - found: false, // implied, but set for clarity - }, - } - - staticRole := staticRoleEntry{ - Name: "test", - Username: "jane-doe", - RotationPeriod: 24 * time.Hour, - } - entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) - if err != nil { - t.Fatal(err) - } - err = config.StorageView.Put(bgCTX, entry) - if err != nil { - t.Fatal(err) - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - req := &logical.Request{ - Operation: logical.ReadOperation, - Storage: config.StorageView, - Data: map[string]interface{}{ - "name": c.roleName, - }, - Path: formatRoleStoragePath(c.roleName), - } - - b := Backend(config) - - r, err := b.pathStaticRolesRead(bgCTX, req, staticRoleFieldData(req.Data)) - if err != nil { - t.Fatal(err) - } - if c.found { - if r == nil { - t.Fatal("response was nil, but it shouldn't have been") - } - } else { - if r != nil { - t.Fatal("response should have been nil on a non-existent role") - } - } - }) - } -} - -// TestStaticRoleDelete validates that we correctly remove a role on a delete request, and that we correctly do not -// remove anything if a role does not exist with that name. -func TestStaticRoleDelete(t *testing.T) { - bgCTX := context.Background() - - // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" - cases := []struct { - name string - role string - found bool - }{ - { - name: "role found", - role: "test", - found: true, - }, - { - name: "role not found", - role: "tossed", - found: false, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - - // fake an IAM - var iamfunc awsutil.IAMAPIFunc - if !c.found { - iamfunc = awsutil.NewMockIAM(awsutil.WithDeleteAccessKeyError(errors.New("shouldn't have called delete"))) - } else { - iamfunc = awsutil.NewMockIAM() - } - miam, err := iamfunc(nil) - if err != nil { - t.Fatalf("couldn't initialize mockiam: %s", err) - } - - b := Backend(config) - b.iamClient = miam - - // put in storage - staticRole := staticRoleEntry{ - Name: "test", - Username: "jane-doe", - RotationPeriod: 24 * time.Hour, - } - entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) - if err != nil { - t.Fatal(err) - } - err = config.StorageView.Put(bgCTX, entry) - if err != nil { - t.Fatal(err) - } - - l, err := config.StorageView.List(bgCTX, "") - if err != nil || len(l) != 1 { - t.Fatalf("couldn't add an entry to storage during test setup: %s", err) - } - - // put in queue - err = b.credRotationQueue.Push(&queue.Item{ - Key: staticRole.Name, - Value: staticRole, - Priority: time.Now().Add(90 * time.Hour).Unix(), - }) - if err != nil { - t.Fatalf("couldn't add items to pq") - } - - req := &logical.Request{ - Operation: logical.ReadOperation, - Storage: config.StorageView, - Data: map[string]interface{}{ - "name": c.role, - }, - Path: formatRoleStoragePath(c.role), - } - - r, err := b.pathStaticRolesDelete(bgCTX, req, staticRoleFieldData(req.Data)) - if err != nil { - t.Fatal(err) - } - if r != nil { - t.Fatal("response wasn't nil, but it should have been") - } - - l, err = config.StorageView.List(bgCTX, "") - if err != nil { - t.Fatal(err) - } - if c.found && len(l) != 0 { - t.Fatal("size of role storage is non zero after delete") - } else if !c.found && len(l) != 1 { - t.Fatal("size of role storage changed after what should have been no deletion") - } - - if c.found && b.credRotationQueue.Len() != 0 { - t.Fatal("size of queue is non-zero after delete") - } else if !c.found && b.credRotationQueue.Len() != 1 { - t.Fatal("size of queue changed after what should have been no deletion") - } - }) - } -} - -func staticRoleFieldData(data map[string]interface{}) *framework.FieldData { - schema := map[string]*framework.FieldSchema{ - paramRoleName: { - Type: framework.TypeString, - Description: descRoleName, - }, - paramUsername: { - Type: framework.TypeString, - Description: descUsername, - }, - paramRotationPeriod: { - Type: framework.TypeDurationSecond, - Description: descRotationPeriod, - }, - } - - return &framework.FieldData{ - Raw: data, - Schema: schema, - } -} diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index f368365c60247..035350cdbfb1f 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -21,12 +18,6 @@ import ( func pathUser(b *backend) *framework.Path { return &framework.Path{ Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixAWS, - OperationVerb: "generate", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -47,19 +38,9 @@ func pathUser(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathCredsRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "credentials|sts-credentials", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathCredsRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "credentials-with-parameters|sts-credentials-with-parameters", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsRead, + logical.UpdateOperation: b.pathCredsRead, }, HelpSynopsis: pathUserHelpSyn, @@ -171,7 +152,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } // Get information about this user - groupsResp, err := client.ListGroupsForUserWithContext(ctx, &iam.ListGroupsForUserInput{ + groupsResp, err := client.ListGroupsForUser(&iam.ListGroupsForUserInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -210,7 +191,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k groups := groupsResp.Groups // Inline (user) policies - policiesResp, err := client.ListUserPoliciesWithContext(ctx, &iam.ListUserPoliciesInput{ + policiesResp, err := client.ListUserPolicies(&iam.ListUserPoliciesInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -220,7 +201,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k policies := policiesResp.PolicyNames // Attached managed policies - manPoliciesResp, err := client.ListAttachedUserPoliciesWithContext(ctx, &iam.ListAttachedUserPoliciesInput{ + manPoliciesResp, err := client.ListAttachedUserPolicies(&iam.ListAttachedUserPoliciesInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -229,7 +210,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } manPolicies := manPoliciesResp.AttachedPolicies - keysResp, err := client.ListAccessKeysWithContext(ctx, &iam.ListAccessKeysInput{ + keysResp, err := client.ListAccessKeys(&iam.ListAccessKeysInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -240,7 +221,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Revoke all keys for _, k := range keys { - _, err = client.DeleteAccessKeyWithContext(ctx, &iam.DeleteAccessKeyInput{ + _, err = client.DeleteAccessKey(&iam.DeleteAccessKeyInput{ AccessKeyId: k.AccessKeyId, UserName: aws.String(username), }) @@ -251,7 +232,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Detach managed policies for _, p := range manPolicies { - _, err = client.DetachUserPolicyWithContext(ctx, &iam.DetachUserPolicyInput{ + _, err = client.DetachUserPolicy(&iam.DetachUserPolicyInput{ UserName: aws.String(username), PolicyArn: p.PolicyArn, }) @@ -262,7 +243,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Delete any inline (user) policies for _, p := range policies { - _, err = client.DeleteUserPolicyWithContext(ctx, &iam.DeleteUserPolicyInput{ + _, err = client.DeleteUserPolicy(&iam.DeleteUserPolicyInput{ UserName: aws.String(username), PolicyName: p, }) @@ -273,7 +254,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Remove the user from all their groups for _, g := range groups { - _, err = client.RemoveUserFromGroupWithContext(ctx, &iam.RemoveUserFromGroupInput{ + _, err = client.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{ GroupName: g.GroupName, UserName: aws.String(username), }) @@ -283,7 +264,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } // Delete the user - _, err = client.DeleteUserWithContext(ctx, &iam.DeleteUserInput{ + _, err = client.DeleteUser(&iam.DeleteUserInput{ UserName: aws.String(username), }) if err != nil { diff --git a/builtin/logical/aws/rollback.go b/builtin/logical/aws/rollback.go index 847ecd1c258b7..e498fc6b2bafd 100644 --- a/builtin/logical/aws/rollback.go +++ b/builtin/logical/aws/rollback.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( diff --git a/builtin/logical/aws/rotation.go b/builtin/logical/aws/rotation.go deleted file mode 100644 index 4461437624656..0000000000000 --- a/builtin/logical/aws/rotation.go +++ /dev/null @@ -1,188 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/queue" -) - -// rotateExpiredStaticCreds will pop expired credentials (credentials whose priority -// represents a time before the present), rotate the associated credential, and push -// them back onto the queue with the new priority. -func (b *backend) rotateExpiredStaticCreds(ctx context.Context, req *logical.Request) error { - var errs *multierror.Error - - for { - keepGoing, err := b.rotateCredential(ctx, req.Storage) - if err != nil { - errs = multierror.Append(errs, err) - } - if !keepGoing { - if errs.ErrorOrNil() != nil { - return fmt.Errorf("error(s) occurred while rotating expired static credentials: %w", errs) - } else { - return nil - } - } - } -} - -// rotateCredential pops an element from the priority queue, and if it is expired, rotate and re-push. -// If a cred was rotated, it returns true, otherwise false. -func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (rotated bool, err error) { - // If queue is empty or first item does not need a rotation (priority is next rotation timestamp) there is nothing to do - item, err := b.credRotationQueue.Pop() - if err != nil { - // the queue is just empty, which is fine. - if err == queue.ErrEmpty { - return false, nil - } - return false, fmt.Errorf("failed to pop from queue for role %q: %w", item.Key, err) - } - if item.Priority > time.Now().Unix() { - // no rotation required - // push the item back into priority queue - err = b.credRotationQueue.Push(item) - if err != nil { - return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", item.Key, err) - } - return false, nil - } - - cfg := item.Value.(staticRoleEntry) - - err = b.createCredential(ctx, storage, cfg, true) - if err != nil { - return false, err - } - - // set new priority and re-queue - item.Priority = time.Now().Add(cfg.RotationPeriod).Unix() - err = b.credRotationQueue.Push(item) - if err != nil { - return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err) - } - - return true, nil -} - -// createCredential will create a new iam credential, deleting the oldest one if necessary. -func (b *backend) createCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { - iamClient, err := b.clientIAM(ctx, storage) - if err != nil { - return fmt.Errorf("unable to get the AWS IAM client: %w", err) - } - - // IAM users can have a most 2 sets of keys at a time. - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) - // Ideally we would get this value through an api check, but I'm not sure one exists. - const maxAllowedKeys = 2 - - err = b.validateIAMUserExists(ctx, storage, &cfg, false) - if err != nil { - return fmt.Errorf("iam user didn't exist, or username/userid didn't match: %w", err) - } - - accessKeys, err := iamClient.ListAccessKeys(&iam.ListAccessKeysInput{ - UserName: aws.String(cfg.Username), - }) - if err != nil { - return fmt.Errorf("unable to list existing access keys for IAM user %q: %w", cfg.Username, err) - } - - // If we have the maximum number of keys, we have to delete one to make another (so we can get the credentials). - // We'll delete the oldest one. - // - // Since this check relies on a pre-coded maximum, it's a bit fragile. If the number goes up, we risk deleting - // a key when we didn't need to. If this number goes down, we'll start throwing errors because we think we're - // allowed to create a key and aren't. In either case, adjusting the constant should be sufficient to fix things. - if len(accessKeys.AccessKeyMetadata) >= maxAllowedKeys { - oldestKey := accessKeys.AccessKeyMetadata[0] - - for i := 1; i < len(accessKeys.AccessKeyMetadata); i++ { - if accessKeys.AccessKeyMetadata[i].CreateDate.Before(*oldestKey.CreateDate) { - oldestKey = accessKeys.AccessKeyMetadata[i] - } - } - - _, err := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ - AccessKeyId: oldestKey.AccessKeyId, - UserName: oldestKey.UserName, - }) - if err != nil { - return fmt.Errorf("unable to delete oldest access keys for user %q: %w", cfg.Username, err) - } - } - - // Create new set of keys - out, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ - UserName: aws.String(cfg.Username), - }) - if err != nil { - return fmt.Errorf("unable to create new access keys for user %q: %w", cfg.Username, err) - } - - // Persist new keys - entry, err := logical.StorageEntryJSON(formatCredsStoragePath(cfg.Name), &awsCredentials{ - AccessKeyID: *out.AccessKey.AccessKeyId, - SecretAccessKey: *out.AccessKey.SecretAccessKey, - }) - if err != nil { - return fmt.Errorf("failed to marshal object to JSON: %w", err) - } - if shouldLockStorage { - b.roleMutex.Lock() - defer b.roleMutex.Unlock() - } - err = storage.Put(ctx, entry) - if err != nil { - return fmt.Errorf("failed to save object in storage: %w", err) - } - - return nil -} - -// delete credential will remove the credential associated with the role from storage. -func (b *backend) deleteCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { - // synchronize storage access if we didn't in the caller. - if shouldLockStorage { - b.roleMutex.Lock() - defer b.roleMutex.Unlock() - } - - key, err := storage.Get(ctx, formatCredsStoragePath(cfg.Name)) - if err != nil { - return fmt.Errorf("couldn't find key in storage: %w", err) - } - // no entry, so i guess we deleted it already - if key == nil { - return nil - } - var creds awsCredentials - err = key.DecodeJSON(&creds) - if err != nil { - return fmt.Errorf("couldn't decode storage entry to a valid credential: %w", err) - } - - err = storage.Delete(ctx, formatCredsStoragePath(cfg.Name)) - if err != nil { - return fmt.Errorf("couldn't delete from storage: %w", err) - } - - // because we have the information, this is the one we created, so it's safe for us to delete. - _, err = b.iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ - AccessKeyId: aws.String(creds.AccessKeyID), - UserName: aws.String(cfg.Username), - }) - if err != nil { - return fmt.Errorf("couldn't delete from IAM: %w", err) - } - - return nil -} diff --git a/builtin/logical/aws/rotation_test.go b/builtin/logical/aws/rotation_test.go deleted file mode 100644 index 8f672efc69196..0000000000000 --- a/builtin/logical/aws/rotation_test.go +++ /dev/null @@ -1,348 +0,0 @@ -package aws - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/aws/aws-sdk-go/service/iam/iamiface" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/go-secure-stdlib/awsutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/queue" -) - -// TestRotation verifies that the rotation code and priority queue correctly selects and rotates credentials -// for static secrets. -func TestRotation(t *testing.T) { - bgCTX := context.Background() - - type credToInsert struct { - config staticRoleEntry // role configuration from a normal createRole request - age time.Duration // how old the cred should be - if this is longer than the config.RotationPeriod, - // the cred is 'pre-expired' - - changed bool // whether we expect the cred to change - this is technically redundant to a comparison between - // rotationPeriod and age. - } - - // due to a limitation with the mockIAM implementation, any cred you want to rotate must have - // username jane-doe and userid unique-id, since we can only pre-can one exact response to GetUser - cases := []struct { - name string - creds []credToInsert - }{ - { - name: "refresh one", - creds: []credToInsert{ - { - config: staticRoleEntry{ - Name: "test", - Username: "jane-doe", - ID: "unique-id", - RotationPeriod: 2 * time.Second, - }, - age: 5 * time.Second, - changed: true, - }, - }, - }, - { - name: "refresh none", - creds: []credToInsert{ - { - config: staticRoleEntry{ - Name: "test", - Username: "jane-doe", - ID: "unique-id", - RotationPeriod: 1 * time.Minute, - }, - age: 5 * time.Second, - changed: false, - }, - }, - }, - { - name: "refresh one of two", - creds: []credToInsert{ - { - config: staticRoleEntry{ - Name: "toast", - Username: "john-doe", - ID: "other-id", - RotationPeriod: 1 * time.Minute, - }, - age: 5 * time.Second, - changed: false, - }, - { - config: staticRoleEntry{ - Name: "test", - Username: "jane-doe", - ID: "unique-id", - RotationPeriod: 1 * time.Second, - }, - age: 5 * time.Second, - changed: true, - }, - }, - }, - { - name: "no creds to rotate", - creds: []credToInsert{}, - }, - } - - ak := "long-access-key-id" - oldSecret := "abcdefghijklmnopqrstuvwxyz" - newSecret := "zyxwvutsrqponmlkjihgfedcba" - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - - b := Backend(config) - - // insert all our creds - for i, cred := range c.creds { - - // all the creds will be the same for every user, but that's okay - // since what we care about is whether they changed on a single-user basis. - miam, err := awsutil.NewMockIAM( - // blank list for existing user - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{ - {}, - }, - }), - // initial key to store - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String(ak), - SecretAccessKey: aws.String(oldSecret), - }, - }), - awsutil.WithGetUserOutput(&iam.GetUserOutput{ - User: &iam.User{ - UserId: aws.String(cred.config.ID), - UserName: aws.String(cred.config.Username), - }, - }), - )(nil) - if err != nil { - t.Fatalf("couldn't initialze mock IAM handler: %s", err) - } - b.iamClient = miam - - err = b.createCredential(bgCTX, config.StorageView, cred.config, true) - if err != nil { - t.Fatalf("couldn't insert credential %d: %s", i, err) - } - - item := &queue.Item{ - Key: cred.config.Name, - Value: cred.config, - Priority: time.Now().Add(-1 * cred.age).Add(cred.config.RotationPeriod).Unix(), - } - err = b.credRotationQueue.Push(item) - if err != nil { - t.Fatalf("couldn't push item onto queue: %s", err) - } - } - - // update aws responses, same argument for why it's okay every cred will be the same - miam, err := awsutil.NewMockIAM( - // old key - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{ - { - AccessKeyId: aws.String(ak), - }, - }, - }), - // new key - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String(ak), - SecretAccessKey: aws.String(newSecret), - }, - }), - awsutil.WithGetUserOutput(&iam.GetUserOutput{ - User: &iam.User{ - UserId: aws.String("unique-id"), - UserName: aws.String("jane-doe"), - }, - }), - )(nil) - if err != nil { - t.Fatalf("couldn't initialze mock IAM handler: %s", err) - } - b.iamClient = miam - - req := &logical.Request{ - Storage: config.StorageView, - } - err = b.rotateExpiredStaticCreds(bgCTX, req) - if err != nil { - t.Fatalf("got an error rotating credentials: %s", err) - } - - // check our credentials - for i, cred := range c.creds { - entry, err := config.StorageView.Get(bgCTX, formatCredsStoragePath(cred.config.Name)) - if err != nil { - t.Fatalf("got an error retrieving credentials %d", i) - } - var out awsCredentials - err = entry.DecodeJSON(&out) - if err != nil { - t.Fatalf("could not unmarshal storage view entry for cred %d to an aws credential: %s", i, err) - } - - if cred.changed && out.SecretAccessKey != newSecret { - t.Fatalf("expected the key for cred %d to have changed, but it hasn't", i) - } else if !cred.changed && out.SecretAccessKey != oldSecret { - t.Fatalf("expected the key for cred %d to have stayed the same, but it changed", i) - } - } - }) - } -} - -type fakeIAM struct { - iamiface.IAMAPI - delReqs []*iam.DeleteAccessKeyInput -} - -func (f *fakeIAM) DeleteAccessKey(r *iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) { - f.delReqs = append(f.delReqs, r) - return f.IAMAPI.DeleteAccessKey(r) -} - -// TestCreateCredential verifies that credential creation firstly only deletes credentials if it needs to (i.e., two -// or more credentials on IAM), and secondly correctly deletes the oldest one. -func TestCreateCredential(t *testing.T) { - cases := []struct { - name string - username string - id string - deletedKey string - opts []awsutil.MockIAMOption - }{ - { - name: "zero keys", - username: "jane-doe", - id: "unique-id", - opts: []awsutil.MockIAMOption{ - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{}, - }), - // delete should _not_ be called - awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String("key"), - SecretAccessKey: aws.String("itsasecret"), - }, - }), - awsutil.WithGetUserOutput(&iam.GetUserOutput{ - User: &iam.User{ - UserId: aws.String("unique-id"), - UserName: aws.String("jane-doe"), - }, - }), - }, - }, - { - name: "one key", - username: "jane-doe", - id: "unique-id", - opts: []awsutil.MockIAMOption{ - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{ - {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Now())}, - }, - }), - // delete should _not_ be called - awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String("key"), - SecretAccessKey: aws.String("itsasecret"), - }, - }), - awsutil.WithGetUserOutput(&iam.GetUserOutput{ - User: &iam.User{ - UserId: aws.String("unique-id"), - UserName: aws.String("jane-doe"), - }, - }), - }, - }, - { - name: "two keys", - username: "jane-doe", - id: "unique-id", - deletedKey: "foo", - opts: []awsutil.MockIAMOption{ - awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ - AccessKeyMetadata: []*iam.AccessKeyMetadata{ - {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Time{})}, - {AccessKeyId: aws.String("bar"), CreateDate: aws.Time(time.Now())}, - }, - }), - awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ - AccessKey: &iam.AccessKey{ - AccessKeyId: aws.String("key"), - SecretAccessKey: aws.String("itsasecret"), - }, - }), - awsutil.WithGetUserOutput(&iam.GetUserOutput{ - User: &iam.User{ - UserId: aws.String("unique-id"), - UserName: aws.String("jane-doe"), - }, - }), - }, - }, - } - - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - miam, err := awsutil.NewMockIAM( - c.opts..., - )(nil) - if err != nil { - t.Fatal(err) - } - fiam := &fakeIAM{ - IAMAPI: miam, - } - - b := Backend(config) - b.iamClient = fiam - - err = b.createCredential(context.Background(), config.StorageView, staticRoleEntry{Username: c.username, ID: c.id}, true) - if err != nil { - t.Fatalf("got an error we didn't expect: %q", err) - } - - if c.deletedKey != "" { - if len(fiam.delReqs) != 1 { - t.Fatalf("called the wrong number of deletes (called %d deletes)", len(fiam.delReqs)) - } - actualKey := *fiam.delReqs[0].AccessKeyId - if c.deletedKey != actualKey { - t.Fatalf("we deleted the wrong key: %q instead of %q", actualKey, c.deletedKey) - } - } - }) - } -} diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index 2f1ac442bcbf5..c70386d681464 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -153,20 +150,15 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, return logical.ErrorResponse("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred), nil } - tokenResp, err := stsClient.GetFederationTokenWithContext(ctx, getTokenInput) + tokenResp, err := stsClient.GetFederationToken(getTokenInput) if err != nil { return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) } - // While STS credentials cannot be revoked/renewed, we will still create a lease since users are - // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. - // - ttl := tokenResp.Credentials.Expiration.Sub(time.Now()) resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ "access_key": *tokenResp.Credentials.AccessKeyId, "secret_key": *tokenResp.Credentials.SecretAccessKey, "security_token": *tokenResp.Credentials.SessionToken, - "ttl": uint64(ttl.Seconds()), }, map[string]interface{}{ "username": username, "policy": policy, @@ -174,7 +166,7 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, }) // Set the secret TTL to appropriately match the expiration of the token - resp.Secret.TTL = ttl + resp.Secret.TTL = tokenResp.Credentials.Expiration.Sub(time.Now()) // STS are purposefully short-lived and aren't renewable resp.Secret.Renewable = false @@ -241,21 +233,16 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, if len(policyARNs) > 0 { assumeRoleInput.SetPolicyArns(convertPolicyARNs(policyARNs)) } - tokenResp, err := stsClient.AssumeRoleWithContext(ctx, assumeRoleInput) + tokenResp, err := stsClient.AssumeRole(assumeRoleInput) if err != nil { return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err) } - // While STS credentials cannot be revoked/renewed, we will still create a lease since users are - // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. - // - ttl := tokenResp.Credentials.Expiration.Sub(time.Now()) resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ "access_key": *tokenResp.Credentials.AccessKeyId, "secret_key": *tokenResp.Credentials.SecretAccessKey, "security_token": *tokenResp.Credentials.SessionToken, "arn": *tokenResp.AssumedRoleUser.Arn, - "ttl": uint64(ttl.Seconds()), }, map[string]interface{}{ "username": roleSessionName, "policy": roleArn, @@ -263,7 +250,7 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, }) // Set the secret TTL to appropriately match the expiration of the token - resp.Secret.TTL = ttl + resp.Secret.TTL = tokenResp.Credentials.Expiration.Sub(time.Now()) // STS are purposefully short-lived and aren't renewable resp.Secret.Renewable = false @@ -340,7 +327,7 @@ func (b *backend) secretAccessKeysCreate( } // Create the user - _, err = iamClient.CreateUserWithContext(ctx, createUserRequest) + _, err = iamClient.CreateUser(createUserRequest) if err != nil { if walErr := framework.DeleteWAL(ctx, s, walID); walErr != nil { iamErr := fmt.Errorf("error creating IAM user: %w", err) @@ -351,7 +338,7 @@ func (b *backend) secretAccessKeysCreate( for _, arn := range role.PolicyArns { // Attach existing policy against user - _, err = iamClient.AttachUserPolicyWithContext(ctx, &iam.AttachUserPolicyInput{ + _, err = iamClient.AttachUserPolicy(&iam.AttachUserPolicyInput{ UserName: aws.String(username), PolicyArn: aws.String(arn), }) @@ -362,7 +349,7 @@ func (b *backend) secretAccessKeysCreate( } if role.PolicyDocument != "" { // Add new inline user policy against user - _, err = iamClient.PutUserPolicyWithContext(ctx, &iam.PutUserPolicyInput{ + _, err = iamClient.PutUserPolicy(&iam.PutUserPolicyInput{ UserName: aws.String(username), PolicyName: aws.String(policyName), PolicyDocument: aws.String(role.PolicyDocument), @@ -374,7 +361,7 @@ func (b *backend) secretAccessKeysCreate( for _, group := range role.IAMGroups { // Add user to IAM groups - _, err = iamClient.AddUserToGroupWithContext(ctx, &iam.AddUserToGroupInput{ + _, err = iamClient.AddUserToGroup(&iam.AddUserToGroupInput{ UserName: aws.String(username), GroupName: aws.String(group), }) @@ -393,7 +380,7 @@ func (b *backend) secretAccessKeysCreate( } if len(tags) > 0 { - _, err = iamClient.TagUserWithContext(ctx, &iam.TagUserInput{ + _, err = iamClient.TagUser(&iam.TagUserInput{ Tags: tags, UserName: &username, }) @@ -404,7 +391,7 @@ func (b *backend) secretAccessKeysCreate( } // Create the keys - keyResp, err := iamClient.CreateAccessKeyWithContext(ctx, &iam.CreateAccessKeyInput{ + keyResp, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ UserName: aws.String(username), }) if err != nil { diff --git a/builtin/logical/aws/secret_access_keys_test.go b/builtin/logical/aws/secret_access_keys_test.go index 890bb57b09122..7ee9d33b80275 100644 --- a/builtin/logical/aws/secret_access_keys_test.go +++ b/builtin/logical/aws/secret_access_keys_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -120,7 +117,7 @@ func TestGenUsername(t *testing.T) { func TestReadConfig_DefaultTemplate(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -164,7 +161,7 @@ func TestReadConfig_DefaultTemplate(t *testing.T) { func TestReadConfig_CustomTemplate(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend(config) + b := Backend() if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/stepwise_test.go b/builtin/logical/aws/stepwise_test.go index b6f1ffea81d51..78feb1442595e 100644 --- a/builtin/logical/aws/stepwise_test.go +++ b/builtin/logical/aws/stepwise_test.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( "os" - "sync" "testing" stepwise "github.com/hashicorp/vault-testing-stepwise" @@ -14,13 +10,11 @@ import ( "github.com/mitchellh/mapstructure" ) -var stepwiseSetup sync.Once - func TestAccBackend_Stepwise_basic(t *testing.T) { t.Parallel() envOptions := &stepwise.MountOptions{ RegistryName: "aws-sec", - PluginType: api.PluginTypeSecrets, + PluginType: stepwise.PluginTypeSecrets, PluginName: "aws", MountPathPrefix: "aws-sec", } @@ -88,7 +82,7 @@ func testAccStepwiseRead(t *testing.T, path, name string, credentialTests []cred } func testAccStepwisePreCheck(t *testing.T) { - stepwiseSetup.Do(func() { + initSetup.Do(func() { if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" { t.Logf("[INFO] Test: Using us-west-2 as test region") os.Setenv("AWS_DEFAULT_REGION", "us-west-2") diff --git a/builtin/logical/cassandra/backend.go b/builtin/logical/cassandra/backend.go new file mode 100644 index 0000000000000..e7087448d15a2 --- /dev/null +++ b/builtin/logical/cassandra/backend.go @@ -0,0 +1,134 @@ +package cassandra + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// Factory creates a new backend +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +// Backend contains the base information for the backend's functionality +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + + Paths: []*framework.Path{ + pathConfigConnection(&b), + pathRoles(&b), + pathCredsCreate(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Invalidate: b.invalidate, + + Clean: func(_ context.Context) { + b.ResetDB(nil) + }, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend + + // Session is goroutine safe, however, since we reinitialize + // it when connection info changes, we want to make sure we + // can close it and use a new connection; hence the lock + session *gocql.Session + lock sync.Mutex +} + +type sessionConfig struct { + Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"` + Username string `json:"username" structs:"username" mapstructure:"username"` + Password string `json:"password" structs:"password" mapstructure:"password"` + TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` + InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` + Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` + IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` + ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"` + ConnectTimeout int `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` + TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"` +} + +// DB returns the database connection. +func (b *backend) DB(ctx context.Context, s logical.Storage) (*gocql.Session, error) { + b.lock.Lock() + defer b.lock.Unlock() + + // If we already have a DB, we got it! + if b.session != nil { + return b.session, nil + } + + entry, err := s.Get(ctx, "config/connection") + if err != nil { + return nil, err + } + if entry == nil { + return nil, fmt.Errorf("configure the DB connection with config/connection first") + } + + config := &sessionConfig{} + if err := entry.DecodeJSON(config); err != nil { + return nil, err + } + + session, err := createSession(config, s) + // Store the session in backend for reuse + b.session = session + + return session, err +} + +// ResetDB forces a connection next time DB() is called. +func (b *backend) ResetDB(newSession *gocql.Session) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.session != nil { + b.session.Close() + } + + b.session = newSession +} + +func (b *backend) invalidate(_ context.Context, key string) { + switch key { + case "config/connection": + b.ResetDB(nil) + } +} + +const backendHelp = ` +The Cassandra backend dynamically generates database users. + +After mounting this backend, configure it using the endpoints within +the "config/" path. +` diff --git a/builtin/logical/cassandra/backend_test.go b/builtin/logical/cassandra/backend_test.go new file mode 100644 index 0000000000000..1b76dfe6c2a42 --- /dev/null +++ b/builtin/logical/cassandra/backend_test.go @@ -0,0 +1,163 @@ +package cassandra + +import ( + "context" + "fmt" + "log" + "testing" + + "github.com/hashicorp/vault/helper/testhelpers/cassandra" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func TestBackend_basic(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + copyFromTo := map[string]string{ + "test-fixtures/cassandra.yaml": "/etc/cassandra/cassandra.yaml", + } + host, cleanup := cassandra.PrepareTestContainer(t, + cassandra.CopyFromTo(copyFromTo), + ) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, host.ConnectionURL()), + testAccStepRole(t), + testAccStepReadCreds(t, "test"), + }, + }) +} + +func TestBackend_roleCrud(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + copyFromTo := map[string]string{ + "test-fixtures/cassandra.yaml": "/etc/cassandra/cassandra.yaml", + } + host, cleanup := cassandra.PrepareTestContainer(t, + cassandra.CopyFromTo(copyFromTo)) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, host.ConnectionURL()), + testAccStepRole(t), + testAccStepRoleWithOptions(t), + testAccStepReadRole(t, "test", testRole), + testAccStepReadRole(t, "test2", testRole), + testAccStepDeleteRole(t, "test"), + testAccStepDeleteRole(t, "test2"), + testAccStepReadRole(t, "test", ""), + testAccStepReadRole(t, "test2", ""), + }, + }) +} + +func testAccStepConfig(t *testing.T, hostname string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Data: map[string]interface{}{ + "hosts": hostname, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 3, + }, + } +} + +func testAccStepRole(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/test", + Data: map[string]interface{}{ + "creation_cql": testRole, + }, + } +} + +func testAccStepRoleWithOptions(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/test2", + Data: map[string]interface{}{ + "creation_cql": testRole, + "lease": "30s", + "consistency": "All", + }, + } +} + +func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + n, + } +} + +func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated credentials: %v", d) + + return nil + }, + } +} + +func testAccStepReadRole(t *testing.T, name string, cql string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if cql == "" { + return nil + } + + return fmt.Errorf("response is nil") + } + + var d struct { + CreationCQL string `mapstructure:"creation_cql"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.CreationCQL != cql { + return fmt.Errorf("bad: %#v\n%#v\n%#v\n", resp, cql, d.CreationCQL) + } + + return nil + }, + } +} + +const testRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; +GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};` diff --git a/builtin/logical/cassandra/cmd/cassandra/main.go b/builtin/logical/cassandra/cmd/cassandra/main.go new file mode 100644 index 0000000000000..0ab900aa112aa --- /dev/null +++ b/builtin/logical/cassandra/cmd/cassandra/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/cassandra" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: cassandra.Factory, + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/cassandra/path_config_connection.go b/builtin/logical/cassandra/path_config_connection.go new file mode 100644 index 0000000000000..afa1816880d81 --- /dev/null +++ b/builtin/logical/cassandra/path_config_connection.go @@ -0,0 +1,245 @@ +package cassandra + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigConnection(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/connection", + Fields: map[string]*framework.FieldSchema{ + "hosts": { + Type: framework.TypeString, + Description: "Comma-separated list of hosts", + }, + + "username": { + Type: framework.TypeString, + Description: "The username to use for connecting to the cluster", + }, + + "password": { + Type: framework.TypeString, + Description: "The password to use for connecting to the cluster", + }, + + "tls": { + Type: framework.TypeBool, + Description: `Whether to use TLS. If pem_bundle or pem_json are +set, this is automatically set to true`, + }, + + "insecure_tls": { + Type: framework.TypeBool, + Description: `Whether to use TLS but skip verification; has no +effect if a CA certificate is provided`, + }, + + // TLS 1.3 is not supported as this engine is deprecated. Please switch to the Cassandra database secrets engine + "tls_min_version": { + Type: framework.TypeString, + Default: "tls12", + Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", + }, + + "pem_bundle": { + Type: framework.TypeString, + Description: `PEM-format, concatenated unencrypted secret key +and certificate, with optional CA certificate`, + }, + + "pem_json": { + Type: framework.TypeString, + Description: `JSON containing a PEM-format, unencrypted secret +key and certificate, with optional CA certificate. +The JSON output of a certificate issued with the PKI +backend can be directly passed into this parameter. +If both this and "pem_bundle" are specified, this will +take precedence.`, + }, + + "protocol_version": { + Type: framework.TypeInt, + Description: `The protocol version to use. Defaults to 2.`, + }, + + "connect_timeout": { + Type: framework.TypeDurationSecond, + Default: 5, + Description: `The connection timeout to use. Defaults to 5.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConnectionRead, + logical.UpdateOperation: b.pathConnectionWrite, + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := req.Storage.Get(ctx, "config/connection") + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse(fmt.Sprintf("Configure the DB connection with config/connection first")), nil + } + + config := &sessionConfig{} + if err := entry.DecodeJSON(config); err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "hosts": config.Hosts, + "username": config.Username, + "tls": config.TLS, + "insecure_tls": config.InsecureTLS, + "certificate": config.Certificate, + "issuing_ca": config.IssuingCA, + "protocol_version": config.ProtocolVersion, + "connect_timeout": config.ConnectTimeout, + "tls_min_version": config.TLSMinVersion, + }, + } + return resp, nil +} + +func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + hosts := data.Get("hosts").(string) + username := data.Get("username").(string) + password := data.Get("password").(string) + + switch { + case len(hosts) == 0: + return logical.ErrorResponse("Hosts cannot be empty"), nil + case len(username) == 0: + return logical.ErrorResponse("Username cannot be empty"), nil + case len(password) == 0: + return logical.ErrorResponse("Password cannot be empty"), nil + } + + config := &sessionConfig{ + Hosts: hosts, + Username: username, + Password: password, + TLS: data.Get("tls").(bool), + InsecureTLS: data.Get("insecure_tls").(bool), + ProtocolVersion: data.Get("protocol_version").(int), + ConnectTimeout: data.Get("connect_timeout").(int), + } + + config.TLSMinVersion = data.Get("tls_min_version").(string) + if config.TLSMinVersion == "" { + return logical.ErrorResponse("failed to get 'tls_min_version' value"), nil + } + + var ok bool + _, ok = tlsutil.TLSLookup[config.TLSMinVersion] + if !ok { + return logical.ErrorResponse("invalid 'tls_min_version'"), nil + } + + if config.InsecureTLS { + config.TLS = true + } + + pemBundle := data.Get("pem_bundle").(string) + pemJSON := data.Get("pem_json").(string) + + var certBundle *certutil.CertBundle + var parsedCertBundle *certutil.ParsedCertBundle + var err error + + switch { + case len(pemJSON) != 0: + parsedCertBundle, err = certutil.ParsePKIJSON([]byte(pemJSON)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err)), nil + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil + } + config.Certificate = certBundle.Certificate + config.PrivateKey = certBundle.PrivateKey + config.IssuingCA = certBundle.IssuingCA + config.TLS = true + + case len(pemBundle) != 0: + parsedCertBundle, err = certutil.ParsePEMBundle(pemBundle) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error parsing the given PEM information: %s", err)), nil + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil + } + config.Certificate = certBundle.Certificate + config.PrivateKey = certBundle.PrivateKey + config.IssuingCA = certBundle.IssuingCA + config.TLS = true + } + + session, err := createSession(config, req.Storage) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Store it + entry, err := logical.StorageEntryJSON("config/connection", config) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + // Reset the DB connection + b.ResetDB(session) + + return nil, nil +} + +const pathConfigConnectionHelpSyn = ` +Configure the connection information to talk to Cassandra. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection information used to connect to Cassandra. + +"hosts" is a comma-delimited list of hostnames in the Cassandra cluster. + +"username" and "password" are self-explanatory, although the given user +must have superuser access within Cassandra. Note that since this backend +issues username/password credentials, Cassandra must be configured to use +PasswordAuthenticator or a similar backend for its authentication. If you wish +to have no authorization in Cassandra and want to use TLS client certificates, +see the PKI backend. + +TLS works as follows: + +* If "tls" is set to true, the connection will use TLS; this happens automatically if "pem_bundle", "pem_json", or "insecure_tls" is set + +* If "insecure_tls" is set to true, the connection will not perform verification of the server certificate; this also sets "tls" to true + +* If only "issuing_ca" is set in "pem_json", or the only certificate in "pem_bundle" is a CA certificate, the given CA certificate will be used for server certificate verification; otherwise the system CA certificates will be used + +* If "certificate" and "private_key" are set in "pem_bundle" or "pem_json", client auth will be turned on for the connection + +"pem_bundle" should be a PEM-concatenated bundle of a private key + client certificate, an issuing CA certificate, or both. "pem_json" should contain the same information; for convenience, the JSON format is the same as that output by the issue command from the PKI backend. + +When configuring the connection information, the backend will verify its +validity. +` diff --git a/builtin/logical/cassandra/path_creds_create.go b/builtin/logical/cassandra/path_creds_create.go new file mode 100644 index 0000000000000..ec100b961317b --- /dev/null +++ b/builtin/logical/cassandra/path_creds_create.go @@ -0,0 +1,123 @@ +package cassandra + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/gocql/gocql" + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathCredsCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead, + }, + + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, + } +} + +func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := getRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", name)), nil + } + + displayName := req.DisplayName + userUUID, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + username := fmt.Sprintf("vault_%s_%s_%s_%d", name, displayName, userUUID, time.Now().Unix()) + username = strings.ReplaceAll(username, "-", "_") + password, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + // Get our connection + session, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Set consistency + if role.Consistency != "" { + consistencyValue, err := gocql.ParseConsistencyWrapper(role.Consistency) + if err != nil { + return nil, err + } + + session.SetConsistency(consistencyValue) + } + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(role.CreationCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + err = session.Query(substQuery(query, map[string]string{ + "username": username, + "password": password, + })).Exec() + if err != nil { + for _, query := range strutil.ParseArbitraryStringSlice(role.RollbackCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + session.Query(substQuery(query, map[string]string{ + "username": username, + "password": password, + })).Exec() + } + return nil, err + } + } + + // Return the secret + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + "role": name, + }) + resp.Secret.TTL = role.Lease + + return resp, nil +} + +const pathCredsCreateReadHelpSyn = ` +Request database credentials for a certain role. +` + +const pathCredsCreateReadHelpDesc = ` +This path creates database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/builtin/logical/cassandra/path_roles.go b/builtin/logical/cassandra/path_roles.go new file mode 100644 index 0000000000000..df7671e47e7cc --- /dev/null +++ b/builtin/logical/cassandra/path_roles.go @@ -0,0 +1,196 @@ +package cassandra + +import ( + "context" + "fmt" + "time" + + "github.com/fatih/structs" + "github.com/gocql/gocql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + defaultCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;` + defaultRollbackCQL = `DROP USER '{{username}}';` +) + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + + "creation_cql": { + Type: framework.TypeString, + Default: defaultCreationCQL, + Description: `CQL to create a user and optionally grant +authorization. If not supplied, a default that +creates non-superuser accounts with the built-in +password authenticator will be used; no +authorization grants will be configured. Separate +statements by semicolons; use @file to load from a +file. Valid template values are '{{username}}' and +'{{password}}' -- the single quotes are important!`, + }, + + "rollback_cql": { + Type: framework.TypeString, + Default: defaultRollbackCQL, + Description: `CQL to roll back an account operation. This will +be used if there is an error during execution of a +statement passed in via the "creation_cql" parameter +parameter. The default simply drops the user, which +should generally be sufficient. Separate statements +by semicolons; use @file to load from a file. Valid +template values are '{{username}}' and +'{{password}}' -- the single quotes are important!`, + }, + + "lease": { + Type: framework.TypeString, + Default: "4h", + Description: "The lease length; defaults to 4 hours", + }, + + "consistency": { + Type: framework.TypeString, + Default: "Quorum", + Description: "The consistency level for the operations; defaults to Quorum.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleCreate, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func getRole(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := getRole(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: structs.New(role).Map(), + }, nil +} + +func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + creationCQL := data.Get("creation_cql").(string) + + rollbackCQL := data.Get("rollback_cql").(string) + + leaseRaw := data.Get("lease").(string) + lease, err := time.ParseDuration(leaseRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error parsing lease value of %s: %s", leaseRaw, err)), nil + } + + consistencyStr := data.Get("consistency").(string) + _, err = gocql.ParseConsistencyWrapper(consistencyStr) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error parsing consistency value of %q: %v", consistencyStr, err)), nil + } + + entry := &roleEntry{ + Lease: lease, + CreationCQL: creationCQL, + RollbackCQL: rollbackCQL, + Consistency: consistencyStr, + } + + // Store it + entryJSON, err := logical.StorageEntryJSON("role/"+name, entry) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entryJSON); err != nil { + return nil, err + } + + return nil, nil +} + +type roleEntry struct { + CreationCQL string `json:"creation_cql" structs:"creation_cql"` + Lease time.Duration `json:"lease" structs:"lease"` + RollbackCQL string `json:"rollback_cql" structs:"rollback_cql"` + Consistency string `json:"consistency" structs:"consistency"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "creation_cql" parameter customizes the CQL string used to create users +and assign them grants. This can be a sequence of CQL queries separated by +semicolons. Some substitution will be done to the CQL string for certain keys. +The names of the variables must be surrounded by '{{' and '}}' to be replaced. +Note that it is important that single quotes are used, not double quotes. + + * "username" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + +If no "creation_cql" parameter is given, a default will be used: + +` + defaultCreationCQL + ` + +This default should be suitable for Cassandra installations using the password +authenticator but not configured to use authorization. + +Similarly, the "rollback_cql" is used if user creation fails, in the absence of +Cassandra transactions. The default should be suitable for almost any +instance of Cassandra: + +` + defaultRollbackCQL + ` + +"lease" the lease time; if not set the mount/system defaults are used. +` diff --git a/builtin/logical/cassandra/secret_creds.go b/builtin/logical/cassandra/secret_creds.go new file mode 100644 index 0000000000000..3ca06c9279038 --- /dev/null +++ b/builtin/logical/cassandra/secret_creds.go @@ -0,0 +1,77 @@ +package cassandra + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// SecretCredsType is the type of creds issued from this backend +const SecretCredsType = "cassandra" + +func secretCreds(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username", + }, + + "password": { + Type: framework.TypeString, + Description: "Password", + }, + }, + + Renew: b.secretCredsRenew, + Revoke: b.secretCredsRevoke, + } +} + +func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the lease information + roleRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("secret is missing role internal data") + } + roleName, ok := roleRaw.(string) + if !ok { + return nil, fmt.Errorf("error converting role internal data to string") + } + + role, err := getRole(ctx, req.Storage, roleName) + if err != nil { + return nil, fmt.Errorf("unable to load role: %w", err) + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = role.Lease + return resp, nil +} + +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("error converting username internal data to string") + } + + session, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, fmt.Errorf("error getting session") + } + + err = session.Query(fmt.Sprintf("DROP USER '%s'", username)).Exec() + if err != nil { + return nil, fmt.Errorf("error removing user %q", username) + } + + return nil, nil +} diff --git a/builtin/logical/cassandra/test-fixtures/cassandra.yaml b/builtin/logical/cassandra/test-fixtures/cassandra.yaml new file mode 100644 index 0000000000000..fe1ec8b07bbd0 --- /dev/null +++ b/builtin/logical/cassandra/test-fixtures/cassandra.yaml @@ -0,0 +1,1146 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true + +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +# hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: CassandraAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /var/lib/cassandra/commitlog + +# Enable / disable CDC functionality on a per-node basis. This modifies the logic used +# for write path allocation rejection (standard: never reject. cdc: reject Mutation +# containing a CDC-enabled table if at space limit in cdc_raw_directory). +cdc_enabled: false + +# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the +# segment contains mutations for a CDC-enabled table. This should be placed on a +# separate spindle than the data directories. If not set, the default directory is +# $CASSANDRA_HOME/data/cdc_raw. +# cdc_raw_directory: /var/lib/cassandra/cdc_raw + +# Policy for data disk failures: +# +# die +# shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# +# stop_paranoid +# shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# +# stop +# shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# +# best_effort +# stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# +# ignore +# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Policy for commit disk failures: +# +# die +# shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# +# stop +# shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# +# stop_commit +# shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# +# ignore +# ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possibly +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not necessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. Available implementations: +# +# org.apache.cassandra.cache.OHCProvider +# Fully off-heap row cache implementation (default). +# +# org.apache.cassandra.cache.SerializingCacheProvider +# This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +# commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# +# heap_buffers +# on heap nio buffers +# +# offheap_buffers +# off heap (direct) nio buffers +# +# offheap_objects +# off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. +# +# memtable_flush_writers defaults to one per data_file_directory. +# +# If your data directories are backed by SSD, you can increase this, but +# avoid having memtable_flush_writers * data_file_directories > number of cores +#memtable_flush_writers: 1 + +# Total space to use for change-data-capture logs on disk. +# +# If space gets above this value, Cassandra will throw WriteTimeoutException +# on Mutations including tables with CDC enabled. A CDCCompactor is responsible +# for parsing the raw CDC logs and deleting them when parsing is completed. +# +# The default value is the min of 4096 mb and 1/8th of the total space +# of the drive where cdc_raw_directory resides. +# cdc_total_space_in_mb: 4096 + +# When we hit our cdc_raw limit and the CDCCompactor is either running behind +# or experiencing backpressure, we check at the following interval to see if any +# new space for cdc-tracked tables has been made available. Default to 250ms +# cdc_free_space_check_interval_ms: 250 + +# A fixed memory pool size in MB for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +listen_address: 172.17.0.2 + +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# listen_interface: eth0 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +broadcast_address: 127.0.0.1 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +# native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. If you're changing this parameter, +# you may want to adjust max_value_size_in_mb accordingly. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: false + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: 0.0.0.0 + +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# rpc_interface: eth1 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +broadcast_rpc_address: 127.0.0.1 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync +# One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha +# Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See also: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and 'man tcp' +# internode_send_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# +# - a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# - but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set socket timeout for streaming operation. +# The stream session is failed if no data/ack is received by any of the participants +# within that period, which means this should also be sufficient to stream a large +# sstable or rebuild table indexes. +# Default value is 86400000ms, which means stale streams timeout after 24 hours. +# A value of zero means stream sockets should never time out. +# streaming_socket_timeout_in_ms: 86400000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH +# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. +# This means that if you start with the default SimpleSnitch, which +# locates every node on "rack1" in "datacenter1", your only options +# if you need to add another datacenter are GossipingPropertyFileSnitch +# (and the older PFS). From there, if you want to migrate to an +# incompatible snitch like Ec2Snitch you can do it by adding new nodes +# under Ec2Snitch (which will locate them in a new "datacenter") and +# decommissioning the old ones. +# +# Out of the box, Cassandra provides: +# +# SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# +# GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# +# PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# +# Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# +# Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# +# NoScheduler +# Has no options +# +# RoundRobin +# throttle_limit +# The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# default_weight +# default_weight is optional and allows for +# overriding the default which is 1. +# weights +# Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# *NOTE* No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set truststore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# Can be: +# +# all +# all traffic is compressed +# +# dc +# traffic between different datacenters is compressed +# +# none +# nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +# This threshold can be adjusted to minimize logging if necessary +# gc_log_threshold_in_ms: 200 + +# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at +# INFO level +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt operations; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 + +# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption +# early. Any value size larger than this threshold will result into marking an SSTable +# as corrupted. +# max_value_size_in_mb: 256 diff --git a/builtin/logical/cassandra/util.go b/builtin/logical/cassandra/util.go new file mode 100644 index 0000000000000..8257aafd787fb --- /dev/null +++ b/builtin/logical/cassandra/util.go @@ -0,0 +1,95 @@ +package cassandra + +import ( + "crypto/tls" + "fmt" + "strings" + "time" + + "github.com/gocql/gocql" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// Query templates a query for us. +func substQuery(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + + return tpl +} + +func createSession(cfg *sessionConfig, s logical.Storage) (*gocql.Session, error) { + clusterConfig := gocql.NewCluster(strings.Split(cfg.Hosts, ",")...) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: cfg.Username, + Password: cfg.Password, + } + + clusterConfig.ProtoVersion = cfg.ProtocolVersion + if clusterConfig.ProtoVersion == 0 { + clusterConfig.ProtoVersion = 2 + } + + clusterConfig.Timeout = time.Duration(cfg.ConnectTimeout) * time.Second + + if cfg.TLS { + var tlsConfig *tls.Config + if len(cfg.Certificate) > 0 || len(cfg.IssuingCA) > 0 { + if len(cfg.Certificate) > 0 && len(cfg.PrivateKey) == 0 { + return nil, fmt.Errorf("found certificate for TLS authentication but no private key") + } + + certBundle := &certutil.CertBundle{} + if len(cfg.Certificate) > 0 { + certBundle.Certificate = cfg.Certificate + certBundle.PrivateKey = cfg.PrivateKey + } + if len(cfg.IssuingCA) > 0 { + certBundle.IssuingCA = cfg.IssuingCA + } + + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return nil, fmt.Errorf("failed to parse certificate bundle: %w", err) + } + + tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient) + if err != nil || tlsConfig == nil { + return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig: %#v; %w", tlsConfig, err) + } + tlsConfig.InsecureSkipVerify = cfg.InsecureTLS + + if cfg.TLSMinVersion != "" { + var ok bool + tlsConfig.MinVersion, ok = tlsutil.TLSLookup[cfg.TLSMinVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_min_version' in config") + } + } else { + // MinVersion was not being set earlier. Reset it to + // zero to gracefully handle upgrades. + tlsConfig.MinVersion = 0 + } + } + + clusterConfig.SslOpts = &gocql.SslOptions{ + Config: tlsConfig, + } + } + + session, err := clusterConfig.CreateSession() + if err != nil { + return nil, fmt.Errorf("error creating session: %w", err) + } + + // Verify the info + err = session.Query(`LIST USERS`).Exec() + if err != nil { + return nil, fmt.Errorf("error validating connection info: %w", err) + } + + return session, nil +} diff --git a/builtin/logical/consul/backend.go b/builtin/logical/consul/backend.go index 5e42a5197ed09..7fce10e262944 100644 --- a/builtin/logical/consul/backend.go +++ b/builtin/logical/consul/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -10,8 +7,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const operationPrefixConsul = "consul" - // ReportedVersion is used to report a specific version to Vault. var ReportedVersion = "" diff --git a/builtin/logical/consul/backend_test.go b/builtin/logical/consul/backend_test.go index 94ce864d965f7..fa7cf647135a2 100644 --- a/builtin/logical/consul/backend_test.go +++ b/builtin/logical/consul/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/builtin/logical/consul/client.go b/builtin/logical/consul/client.go index 1e30c660271c4..fd54830a4b777 100644 --- a/builtin/logical/consul/client.go +++ b/builtin/logical/consul/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/builtin/logical/consul/cmd/consul/main.go b/builtin/logical/consul/cmd/consul/main.go index f42a535b95876..3b884ddf85ef4 100644 --- a/builtin/logical/consul/cmd/consul/main.go +++ b/builtin/logical/consul/cmd/consul/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: consul.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/consul/path_config.go b/builtin/logical/consul/path_config.go index 2f925d5b24e91..1fd60e30ec578 100644 --- a/builtin/logical/consul/path_config.go +++ b/builtin/logical/consul/path_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -15,11 +12,6 @@ import ( func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixConsul, - }, - Fields: map[string]*framework.FieldSchema{ "address": { Type: framework.TypeString, @@ -60,20 +52,9 @@ must be x509 PEM encoded and if this is set you need to also set client_cert.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigAccessRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "access-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigAccessWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "access", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigAccessRead, + logical.UpdateOperation: b.pathConfigAccessWrite, }, } } @@ -135,7 +116,7 @@ func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Reques } token, _, err := client.ACL().Bootstrap() if err != nil { - return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs: %s", err), nil + return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs"), err } config.Token = token.SecretID } diff --git a/builtin/logical/consul/path_roles.go b/builtin/logical/consul/path_roles.go index 3e8c059f53d5a..fa513b5017d28 100644 --- a/builtin/logical/consul/path_roles.go +++ b/builtin/logical/consul/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -17,11 +14,6 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixConsul, - OperationSuffix: "roles", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -31,12 +23,6 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixConsul, - OperationSuffix: "role", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/consul/path_token.go b/builtin/logical/consul/path_token.go index 9c8dbd1d4457e..7568774f39c6b 100644 --- a/builtin/logical/consul/path_token.go +++ b/builtin/logical/consul/path_token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -21,13 +18,6 @@ const ( func pathToken(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("role"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixConsul, - OperationVerb: "generate", - OperationSuffix: "credentials", - }, - Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, diff --git a/builtin/logical/consul/path_token_test.go b/builtin/logical/consul/path_token_test.go index 77e7f29ab128a..98e2b826fbcae 100644 --- a/builtin/logical/consul/path_token_test.go +++ b/builtin/logical/consul/path_token_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/builtin/logical/consul/secret_token.go b/builtin/logical/consul/secret_token.go index f2f206b7026a7..6dbccca014cef 100644 --- a/builtin/logical/consul/secret_token.go +++ b/builtin/logical/consul/secret_token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index 94091e201947e..e2e362fd5fa31 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -27,11 +24,10 @@ import ( ) const ( - operationPrefixDatabase = "database" - databaseConfigPath = "config/" - databaseRolePath = "role/" - databaseStaticRolePath = "static-role/" - minRootCredRollbackAge = 1 * time.Minute + databaseConfigPath = "config/" + databaseRolePath = "role/" + databaseStaticRolePath = "static-role/" + minRootCredRollbackAge = 1 * time.Minute ) type dbPluginInstance struct { diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 574bcd01af629..27ce027c959e2 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/credentials.go b/builtin/logical/database/credentials.go index c43c2648f03de..a6e54678d0589 100644 --- a/builtin/logical/database/credentials.go +++ b/builtin/logical/database/credentials.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -8,17 +5,12 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" - "crypto/x509/pkix" "encoding/pem" "fmt" "io" "strings" - "time" "github.com/hashicorp/vault/helper/random" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/helper/template" "github.com/mitchellh/mapstructure" ) @@ -175,217 +167,3 @@ func (kg rsaKeyGenerator) configMap() (map[string]interface{}, error) { } return config, nil } - -type ClientCertificateGenerator struct { - // CommonNameTemplate is username template to be used for the client certificate common name. - CommonNameTemplate string `mapstructure:"common_name_template,omitempty"` - - // CAPrivateKey is the PEM-encoded private key for the given ca_cert. - CAPrivateKey string `mapstructure:"ca_private_key,omitempty"` - - // CACert is the PEM-encoded CA certificate. - CACert string `mapstructure:"ca_cert,omitempty"` - - // KeyType specifies the desired key type. - // Options include: 'rsa', 'ed25519', 'ec'. - KeyType string `mapstructure:"key_type,omitempty"` - - // KeyBits is the number of bits to use for the generated keys. - // Options include: with key_type=rsa, 2048 (default), 3072, 4096; - // With key_type=ec, allowed values are: 224, 256 (default), 384, 521; - // Ignored with key_type=ed25519. - KeyBits int `mapstructure:"key_bits,omitempty"` - - // SignatureBits is the number of bits to use in the signature algorithm. - // Options include: 256 (default), 384, 512. - SignatureBits int `mapstructure:"signature_bits,omitempty"` - - parsedCABundle *certutil.ParsedCertBundle - cnProducer template.StringTemplate -} - -// newClientCertificateGenerator returns a new ClientCertificateGenerator -// using the given config. Default values will be set on the returned -// ClientCertificateGenerator if not provided in the config. -func newClientCertificateGenerator(config map[string]interface{}) (ClientCertificateGenerator, error) { - var cg ClientCertificateGenerator - if err := mapstructure.WeakDecode(config, &cg); err != nil { - return cg, err - } - - switch cg.KeyType { - case "rsa": - switch cg.KeyBits { - case 0: - cg.KeyBits = 2048 - case 2048, 3072, 4096: - default: - return cg, fmt.Errorf("invalid key_bits") - } - case "ec": - switch cg.KeyBits { - case 0: - cg.KeyBits = 256 - case 224, 256, 384, 521: - default: - return cg, fmt.Errorf("invalid key_bits") - } - case "ed25519": - // key_bits ignored - default: - return cg, fmt.Errorf("invalid key_type") - } - - switch cg.SignatureBits { - case 0: - cg.SignatureBits = 256 - case 256, 384, 512: - default: - return cg, fmt.Errorf("invalid signature_bits") - } - - if cg.CommonNameTemplate == "" { - return cg, fmt.Errorf("missing required common_name_template") - } - - // Validate the common name template - t, err := template.NewTemplate(template.Template(cg.CommonNameTemplate)) - if err != nil { - return cg, fmt.Errorf("failed to create template: %w", err) - } - - _, err = t.Generate(dbplugin.UsernameMetadata{}) - if err != nil { - return cg, fmt.Errorf("invalid common_name_template: %w", err) - } - cg.cnProducer = t - - if cg.CACert == "" { - return cg, fmt.Errorf("missing required ca_cert") - } - if cg.CAPrivateKey == "" { - return cg, fmt.Errorf("missing required ca_private_key") - } - parsedBundle, err := certutil.ParsePEMBundle(strings.Join([]string{cg.CACert, cg.CAPrivateKey}, "\n")) - if err != nil { - return cg, err - } - if parsedBundle.PrivateKey == nil { - return cg, fmt.Errorf("private key not found in the PEM bundle") - } - if parsedBundle.PrivateKeyType == certutil.UnknownPrivateKey { - return cg, fmt.Errorf("unknown private key found in the PEM bundle") - } - if parsedBundle.Certificate == nil { - return cg, fmt.Errorf("certificate not found in the PEM bundle") - } - if !parsedBundle.Certificate.IsCA { - return cg, fmt.Errorf("the given certificate is not marked for CA use") - } - if !parsedBundle.Certificate.BasicConstraintsValid { - return cg, fmt.Errorf("the given certificate does not meet basic constraints for CA use") - } - - certBundle, err := parsedBundle.ToCertBundle() - if err != nil { - return cg, fmt.Errorf("error converting raw values into cert bundle: %w", err) - } - - parsedCABundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return cg, fmt.Errorf("failed to parse cert bundle: %w", err) - } - cg.parsedCABundle = parsedCABundle - - return cg, nil -} - -func (cg *ClientCertificateGenerator) generate(r io.Reader, expiration time.Time, userMeta dbplugin.UsernameMetadata) (*certutil.CertBundle, string, error) { - commonName, err := cg.cnProducer.Generate(userMeta) - if err != nil { - return nil, "", err - } - - // Set defaults - keyBits := cg.KeyBits - signatureBits := cg.SignatureBits - switch cg.KeyType { - case "rsa": - if keyBits == 0 { - keyBits = 2048 - } - if signatureBits == 0 { - signatureBits = 256 - } - case "ec": - if keyBits == 0 { - keyBits = 256 - } - if signatureBits == 0 { - if keyBits == 224 { - signatureBits = 256 - } else { - signatureBits = keyBits - } - } - case "ed25519": - // key_bits ignored - if signatureBits == 0 { - signatureBits = 256 - } - } - - subject := pkix.Name{ - CommonName: commonName, - // Additional subject DN options intentionally omitted for now - } - - creation := &certutil.CreationBundle{ - Params: &certutil.CreationParameters{ - Subject: subject, - KeyType: cg.KeyType, - KeyBits: cg.KeyBits, - SignatureBits: cg.SignatureBits, - NotAfter: expiration, - KeyUsage: x509.KeyUsageDigitalSignature, - ExtKeyUsage: certutil.ClientAuthExtKeyUsage, - BasicConstraintsValidForNonCA: false, - NotBeforeDuration: 30 * time.Second, - URLs: &certutil.URLEntries{ - IssuingCertificates: []string{}, - CRLDistributionPoints: []string{}, - OCSPServers: []string{}, - }, - }, - SigningBundle: &certutil.CAInfoBundle{ - ParsedCertBundle: *cg.parsedCABundle, - URLs: &certutil.URLEntries{ - IssuingCertificates: []string{}, - CRLDistributionPoints: []string{}, - OCSPServers: []string{}, - }, - }, - } - - parsedClientBundle, err := certutil.CreateCertificateWithRandomSource(creation, r) - if err != nil { - return nil, "", fmt.Errorf("unable to generate client certificate: %w", err) - } - - cb, err := parsedClientBundle.ToCertBundle() - if err != nil { - return nil, "", fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) - } - - return cb, subject.String(), nil -} - -// configMap returns the configuration of the ClientCertificateGenerator -// as a map from string to string. -func (cg ClientCertificateGenerator) configMap() (map[string]interface{}, error) { - config := make(map[string]interface{}) - if err := mapstructure.WeakDecode(cg, &config); err != nil { - return nil, err - } - return config, nil -} diff --git a/builtin/logical/database/credentials_test.go b/builtin/logical/database/credentials_test.go index 5e113e3b2310f..32ddc26856fef 100644 --- a/builtin/logical/database/credentials_test.go +++ b/builtin/logical/database/credentials_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -17,252 +14,6 @@ import ( "github.com/stretchr/testify/mock" ) -// Test_newClientCertificateGenerator tests the ClientCertificateGenerator struct based on the config -func Test_newClientCertificateGenerator(t *testing.T) { - type args struct { - config map[string]interface{} - } - tests := []struct { - name string - args args - want ClientCertificateGenerator - wantErr bool - }{ - { - name: "newClientCertificateGenerator with nil config", - args: args{ - config: nil, - }, - want: ClientCertificateGenerator{ - CommonNameTemplate: "", - CAPrivateKey: "", - CACert: "", - KeyType: "", - KeyBits: 0, - SignatureBits: 0, - }, - }, - { - name: "newClientCertificateGenerator with zero value key_type", - args: args{ - config: map[string]interface{}{ - "key_type": "", - }, - }, - want: ClientCertificateGenerator{ - KeyType: "", - }, - }, - { - name: "newClientCertificateGenerator with rsa value key_type", - args: args{ - config: map[string]interface{}{ - "key_type": "rsa", - }, - }, - want: ClientCertificateGenerator{ - KeyType: "rsa", - KeyBits: 2048, - SignatureBits: 256, - }, - }, - { - name: "newClientCertificateGenerator with ec value key_type", - args: args{ - config: map[string]interface{}{ - "key_type": "ec", - }, - }, - want: ClientCertificateGenerator{ - KeyType: "ec", - KeyBits: 256, - SignatureBits: 256, - }, - }, - { - name: "newClientCertificateGenerator with ed25519 value key_type", - args: args{ - config: map[string]interface{}{ - "key_type": "ed25519", - }, - }, - want: ClientCertificateGenerator{ - KeyType: "ed25519", - SignatureBits: 256, - }, - }, - { - name: "newClientCertificateGenerator with invalid key_type", - args: args{ - config: map[string]interface{}{ - "key_type": "ece", - }, - }, - wantErr: true, - }, - { - name: "newClientCertificateGenerator with zero value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "0", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 0, - }, - }, - { - name: "newClientCertificateGenerator with 2048 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "2048", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 2048, - }, - }, - { - name: "newClientCertificateGenerator with 3072 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "3072", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 3072, - }, - }, - { - name: "newClientCertificateGenerator with 4096 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "4096", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 4096, - }, - }, - { - name: "newClientCertificateGenerator with 224 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "224", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 224, - }, - }, - { - name: "newClientCertificateGenerator with 256 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "256", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 256, - }, - }, - { - name: "newClientCertificateGenerator with 384 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "384", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 384, - }, - }, - { - name: "newClientCertificateGenerator with 521 value key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "521", - }, - }, - want: ClientCertificateGenerator{ - KeyBits: 521, - }, - }, - { - name: "newClientCertificateGenerator with invalid key_bits", - args: args{ - config: map[string]interface{}{ - "key_bits": "4097", - }, - }, - wantErr: true, - }, - { - name: "newClientCertificateGenerator with zero value signature_bits", - args: args{ - config: map[string]interface{}{ - "signature_bits": "0", - }, - }, - want: ClientCertificateGenerator{ - SignatureBits: 0, - }, - }, - { - name: "newClientCertificateGenerator with 256 value signature_bits", - args: args{ - config: map[string]interface{}{ - "signature_bits": "256", - }, - }, - want: ClientCertificateGenerator{ - SignatureBits: 256, - }, - }, - { - name: "newClientCertificateGenerator with 384 value signature_bits", - args: args{ - config: map[string]interface{}{ - "signature_bits": "384", - }, - }, - want: ClientCertificateGenerator{ - SignatureBits: 384, - }, - }, - { - name: "newClientCertificateGenerator with 512 value signature_bits", - args: args{ - config: map[string]interface{}{ - "signature_bits": "512", - }, - }, - want: ClientCertificateGenerator{ - SignatureBits: 512, - }, - }, - { - name: "newClientCertificateGenerator with invalid signature_bits", - args: args{ - config: map[string]interface{}{ - "signature_bits": "612", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := newClientCertificateGenerator(tt.args.config) - if tt.wantErr { - assert.Error(t, err) - return - } - assert.Equal(t, tt.want, got) - }) - } -} - func Test_newPasswordGenerator(t *testing.T) { type args struct { config map[string]interface{} diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index 2b5f7a981ec36..bea9e30ec7a58 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin_test import ( diff --git a/builtin/logical/database/mocks_test.go b/builtin/logical/database/mocks_test.go index afb1bbc79f68a..13eb530061426 100644 --- a/builtin/logical/database/mocks_test.go +++ b/builtin/logical/database/mocks_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/mockv4.go b/builtin/logical/database/mockv4.go index a85f307ec49d6..4f0b181683a85 100644 --- a/builtin/logical/database/mockv4.go +++ b/builtin/logical/database/mockv4.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/mockv5.go b/builtin/logical/database/mockv5.go index fecccfed209f5..632cfb38e037f 100644 --- a/builtin/logical/database/mockv5.go +++ b/builtin/logical/database/mockv5.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go index b869facef0dec..9f1ad4cf5744d 100644 --- a/builtin/logical/database/path_config_connection.go +++ b/builtin/logical/database/path_config_connection.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -62,13 +59,6 @@ func (c *DatabaseConfig) SupportsCredentialType(credentialType v5.CredentialType func pathResetConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "reset", - OperationSuffix: "connection", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -113,11 +103,6 @@ func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -164,36 +149,11 @@ func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { }, ExistenceCheck: b.connectionExistenceCheck(), - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.CreateOperation: &framework.PathOperation{ - Callback: b.connectionWriteHandler(), - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "connection", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.connectionWriteHandler(), - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "connection", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.connectionReadHandler(), - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "read", - OperationSuffix: "connection-configuration", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.connectionDeleteHandler(), - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "delete", - OperationSuffix: "connection-configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.CreateOperation: b.connectionWriteHandler(), + logical.UpdateOperation: b.connectionWriteHandler(), + logical.ReadOperation: b.connectionReadHandler(), + logical.DeleteOperation: b.connectionDeleteHandler(), }, HelpSynopsis: pathConfigConnectionHelpSyn, @@ -221,11 +181,6 @@ func pathListPluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/?$"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationSuffix: "connections", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.connectionListHandler(), }, diff --git a/builtin/logical/database/path_config_connection_test.go b/builtin/logical/database/path_config_connection_test.go index 8cf06062890f5..18f850dbce925 100644 --- a/builtin/logical/database/path_config_connection_test.go +++ b/builtin/logical/database/path_config_connection_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go index a0fe6a38456c9..e57516259fefb 100644 --- a/builtin/logical/database/path_creds_create.go +++ b/builtin/logical/database/path_creds_create.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -9,7 +6,6 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" @@ -19,13 +15,6 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "creds/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "generate", - OperationSuffix: "credentials", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -42,13 +31,6 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { }, { Pattern: "static-creds/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "read", - OperationSuffix: "static-role-credentials", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -172,27 +154,6 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { // Set output credential respData["rsa_private_key"] = string(private) - case v5.CredentialTypeClientCertificate: - generator, err := newClientCertificateGenerator(role.CredentialConfig) - if err != nil { - return nil, fmt.Errorf("failed to construct credential generator: %s", err) - } - - // Generate the client certificate - cb, subject, err := generator.generate(b.GetRandomReader(), expiration, - newUserReq.UsernameConfig) - if err != nil { - return nil, fmt.Errorf("failed to generate client certificate: %w", err) - } - - // Set input credential - newUserReq.CredentialType = dbplugin.CredentialTypeClientCertificate - newUserReq.Subject = subject - - // Set output credential - respData["client_certificate"] = cb.Certificate - respData["private_key"] = cb.PrivateKey - respData["private_key_type"] = cb.PrivateKeyType } // Overwriting the password in the event this is a legacy database diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go index ba4aa7f20e4b3..02a199c5326d8 100644 --- a/builtin/logical/database/path_roles.go +++ b/builtin/logical/database/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -24,12 +21,6 @@ func pathListRoles(b *databaseBackend) []*framework.Path { { Pattern: "roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "list", - OperationSuffix: "roles", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -40,12 +31,6 @@ func pathListRoles(b *databaseBackend) []*framework.Path { { Pattern: "static-roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "list", - OperationSuffix: "static-roles", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -59,11 +44,7 @@ func pathListRoles(b *databaseBackend) []*framework.Path { func pathRoles(b *databaseBackend) []*framework.Path { return []*framework.Path{ { - Pattern: "roles/" + framework.GenericNameRegex("name"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationSuffix: "role", - }, + Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: fieldsForType(databaseRolePath), ExistenceCheck: b.pathRoleExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -78,11 +59,7 @@ func pathRoles(b *databaseBackend) []*framework.Path { }, { - Pattern: "static-roles/" + framework.GenericNameRegex("name"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationSuffix: "static-role", - }, + Pattern: "static-roles/" + framework.GenericNameRegex("name"), Fields: fieldsForType(databaseStaticRolePath), ExistenceCheck: b.pathStaticRoleExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -651,8 +628,6 @@ func (r *roleEntry) setCredentialType(credentialType string) error { r.CredentialType = v5.CredentialTypePassword case v5.CredentialTypeRSAPrivateKey.String(): r.CredentialType = v5.CredentialTypeRSAPrivateKey - case v5.CredentialTypeClientCertificate.String(): - r.CredentialType = v5.CredentialTypeClientCertificate default: return fmt.Errorf("invalid credential_type %q", credentialType) } @@ -694,18 +669,6 @@ func (r *roleEntry) setCredentialConfig(config map[string]string) error { if len(cm) > 0 { r.CredentialConfig = cm } - case v5.CredentialTypeClientCertificate: - generator, err := newClientCertificateGenerator(c) - if err != nil { - return err - } - cm, err := generator.configMap() - if err != nil { - return err - } - if len(cm) > 0 { - r.CredentialConfig = cm - } } return nil diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go index dc2eddeb1a04f..bfb20633896ef 100644 --- a/builtin/logical/database/path_roles_test.go +++ b/builtin/logical/database/path_roles_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index 49081a3c3f404..2d5fa7a8f9aea 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -19,13 +16,6 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "rotate-root/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "rotate", - OperationSuffix: "root-credentials", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -46,13 +36,6 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { }, { Pattern: "rotate-role/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixDatabase, - OperationVerb: "rotate", - OperationSuffix: "static-role-credentials", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go index 22ce6168663e0..a9810e816643f 100644 --- a/builtin/logical/database/rollback.go +++ b/builtin/logical/database/rollback.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go index 8f36fe26a7957..dc061ae99a3fb 100644 --- a/builtin/logical/database/rollback_test.go +++ b/builtin/logical/database/rollback_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/rotation.go b/builtin/logical/database/rotation.go index 1ef54aecac326..5ae2756f279cd 100644 --- a/builtin/logical/database/rotation.go +++ b/builtin/logical/database/rotation.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go index e0cb96dd67cfb..1fdbba55f1d6d 100644 --- a/builtin/logical/database/rotation_test.go +++ b/builtin/logical/database/rotation_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( @@ -509,6 +506,8 @@ func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { // Second scenario, WAL contains a role name that does exist, but the role's // LastVaultRotation is greater than the WAL has func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { + t.Skip("temporarily disabled due to intermittent failures") + cluster, sys := getCluster(t) defer cluster.Cleanup() diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go index fefa452a5d356..9c9b348e2437a 100644 --- a/builtin/logical/database/secret_creds.go +++ b/builtin/logical/database/secret_creds.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go index daab17964d3e2..8c4db1388861c 100644 --- a/builtin/logical/database/version_wrapper.go +++ b/builtin/logical/database/version_wrapper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go index 95a5f7b6fa953..054241f978a32 100644 --- a/builtin/logical/database/version_wrapper_test.go +++ b/builtin/logical/database/version_wrapper_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database import ( diff --git a/builtin/logical/database/versioning_large_test.go b/builtin/logical/database/versioning_large_test.go index b39ddb7e1ce4a..a9f7efde62a68 100644 --- a/builtin/logical/database/versioning_large_test.go +++ b/builtin/logical/database/versioning_large_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package database // This file contains all "large"/expensive tests. These are running requests against a running backend diff --git a/builtin/logical/mongodb/backend.go b/builtin/logical/mongodb/backend.go new file mode 100644 index 0000000000000..a9374243b86c1 --- /dev/null +++ b/builtin/logical/mongodb/backend.go @@ -0,0 +1,144 @@ +package mongodb + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + mgo "gopkg.in/mgo.v2" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *framework.Backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + + Paths: []*framework.Path{ + pathConfigConnection(&b), + pathConfigLease(&b), + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Clean: b.ResetSession, + + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + + return b.Backend +} + +type backend struct { + *framework.Backend + + session *mgo.Session + lock sync.Mutex +} + +// Session returns the database connection. +func (b *backend) Session(ctx context.Context, s logical.Storage) (*mgo.Session, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.session != nil { + if err := b.session.Ping(); err == nil { + return b.session, nil + } + b.session.Close() + } + + connConfigJSON, err := s.Get(ctx, "config/connection") + if err != nil { + return nil, err + } + if connConfigJSON == nil { + return nil, fmt.Errorf("configure the MongoDB connection with config/connection first") + } + + var connConfig connectionConfig + if err := connConfigJSON.DecodeJSON(&connConfig); err != nil { + return nil, err + } + + dialInfo, err := parseMongoURI(connConfig.URI) + if err != nil { + return nil, err + } + + b.session, err = mgo.DialWithInfo(dialInfo) + if err != nil { + return nil, err + } + b.session.SetSyncTimeout(1 * time.Minute) + b.session.SetSocketTimeout(1 * time.Minute) + + return b.session, nil +} + +// ResetSession forces creation of a new connection next time Session() is called. +func (b *backend) ResetSession(_ context.Context) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.session != nil { + b.session.Close() + } + + b.session = nil +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch key { + case "config/connection": + b.ResetSession(ctx) + } +} + +// LeaseConfig returns the lease configuration +func (b *backend) LeaseConfig(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, "config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +const backendHelp = ` +The mongodb backend dynamically generates MongoDB credentials. + +After mounting this backend, configure it using the endpoints within +the "config/" path. +` diff --git a/builtin/logical/mongodb/backend_test.go b/builtin/logical/mongodb/backend_test.go new file mode 100644 index 0000000000000..43cee7de981ca --- /dev/null +++ b/builtin/logical/mongodb/backend_test.go @@ -0,0 +1,268 @@ +package mongodb + +import ( + "context" + "fmt" + "log" + "strings" + "sync" + "testing" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/helper/testhelpers/mongodb" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +var testImagePull sync.Once + +func TestBackend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "uri": "sample_connection_uri", + "verify_connection": false, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } +} + +func TestBackend_basic(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURI := mongodb.PrepareTestContainer(t, "5.0.10") + defer cleanup() + connData := map[string]interface{}{ + "uri": connURI, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(connData, false), + testAccStepRole(), + testAccStepReadCreds("web"), + }, + }) +} + +func TestBackend_roleCrud(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURI := mongodb.PrepareTestContainer(t, "5.0.10") + defer cleanup() + connData := map[string]interface{}{ + "uri": connURI, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(connData, false), + testAccStepRole(), + testAccStepReadRole("web", testDb, testMongoDBRoles), + testAccStepDeleteRole("web"), + testAccStepReadRole("web", "", ""), + }, + }) +} + +func TestBackend_leaseWriteRead(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURI := mongodb.PrepareTestContainer(t, "5.0.10") + defer cleanup() + connData := map[string]interface{}{ + "uri": connURI, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(connData, false), + testAccStepWriteLease(), + testAccStepReadLease(), + }, + }) +} + +func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Data: d, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if expectError { + if resp.Data == nil { + return fmt.Errorf("data is nil") + } + var e struct { + Error string `mapstructure:"error"` + } + if err := mapstructure.Decode(resp.Data, &e); err != nil { + return err + } + if len(e.Error) == 0 { + return fmt.Errorf("expected error, but write succeeded") + } + return nil + } else if resp != nil && resp.IsError() { + return fmt.Errorf("got an error response: %v", resp.Error()) + } + return nil + }, + } +} + +func testAccStepRole() logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/web", + Data: map[string]interface{}{ + "db": testDb, + "roles": testMongoDBRoles, + }, + } +} + +func testAccStepDeleteRole(n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + n, + } +} + +func testAccStepReadCreds(name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + DB string `mapstructure:"db"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.DB == "" { + return fmt.Errorf("bad: %#v", resp) + } + if d.Username == "" { + return fmt.Errorf("bad: %#v", resp) + } + if !strings.HasPrefix(d.Username, "vault-root-") { + return fmt.Errorf("bad: %#v", resp) + } + if d.Password == "" { + return fmt.Errorf("bad: %#v", resp) + } + + log.Printf("[WARN] Generated credentials: %v", d) + + return nil + }, + } +} + +func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if db == "" && mongoDBRoles == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + DB string `mapstructure:"db"` + MongoDBRoles string `mapstructure:"roles"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.DB != db { + return fmt.Errorf("bad: %#v", resp) + } + if d.MongoDBRoles != mongoDBRoles { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +func testAccStepWriteLease() logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/lease", + Data: map[string]interface{}{ + "ttl": "1h5m", + "max_ttl": "24h", + }, + } +} + +func testAccStepReadLease() logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "config/lease", + Check: func(resp *logical.Response) error { + if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +const ( + testDb = "foo" + testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]` +) diff --git a/builtin/logical/mongodb/cmd/mongodb/main.go b/builtin/logical/mongodb/cmd/mongodb/main.go new file mode 100644 index 0000000000000..619f8e3a5a40e --- /dev/null +++ b/builtin/logical/mongodb/cmd/mongodb/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/mongodb" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: mongodb.Factory, + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/mongodb/path_config_connection.go b/builtin/logical/mongodb/path_config_connection.go new file mode 100644 index 0000000000000..7f7360729a1dc --- /dev/null +++ b/builtin/logical/mongodb/path_config_connection.go @@ -0,0 +1,112 @@ +package mongodb + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + mgo "gopkg.in/mgo.v2" +) + +func pathConfigConnection(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/connection", + Fields: map[string]*framework.FieldSchema{ + "uri": { + Type: framework.TypeString, + Description: "MongoDB standard connection string (URI)", + }, + "verify_connection": { + Type: framework.TypeBool, + Default: true, + Description: `If set, uri is verified by actually connecting to the database`, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConnectionRead, + logical.UpdateOperation: b.pathConnectionWrite, + }, + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +// pathConnectionRead reads out the connection configuration +func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := req.Storage.Get(ctx, "config/connection") + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration") + } + if entry == nil { + return nil, nil + } + + return nil, nil +} + +func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + uri := data.Get("uri").(string) + if uri == "" { + return logical.ErrorResponse("uri parameter is required"), nil + } + + dialInfo, err := parseMongoURI(uri) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("invalid uri: %s", err)), nil + } + + // Don't check the config if verification is disabled + verifyConnection := data.Get("verify_connection").(bool) + if verifyConnection { + // Verify the config + session, err := mgo.DialWithInfo(dialInfo) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error validating connection info: %s", err)), nil + } + defer session.Close() + if err := session.Ping(); err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error validating connection info: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ + URI: uri, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + // Reset the Session + b.ResetSession(ctx) + + resp := &logical.Response{} + resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URI as it is, including passwords, if any.") + + return resp, nil +} + +type connectionConfig struct { + URI string `json:"uri" structs:"uri" mapstructure:"uri"` +} + +const pathConfigConnectionHelpSyn = ` +Configure the connection string to talk to MongoDB. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the standard connection string (URI) used to connect to MongoDB. + +A MongoDB URI looks like: +"mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]" + +See https://docs.mongodb.org/manual/reference/connection-string/ for detailed documentation of the URI format. + +When configuring the connection string, the backend will verify its validity. +` diff --git a/builtin/logical/mongodb/path_config_lease.go b/builtin/logical/mongodb/path_config_lease.go new file mode 100644 index 0000000000000..c64a4d1d89a7f --- /dev/null +++ b/builtin/logical/mongodb/path_config_lease.go @@ -0,0 +1,89 @@ +package mongodb + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + Fields: map[string]*framework.FieldSchema{ + "ttl": { + Type: framework.TypeDurationSecond, + Description: "Default ttl for credentials.", + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: "Maximum time a set of credentials can be valid for.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigLeaseRead, + logical.UpdateOperation: b.pathConfigLeaseWrite, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +func (b *backend) pathConfigLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + TTL: time.Second * time.Duration(d.Get("ttl").(int)), + MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathConfigLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "ttl": leaseConfig.TTL.Seconds(), + "max_ttl": leaseConfig.MaxTTL.Seconds(), + }, + }, nil +} + +type configLease struct { + TTL time.Duration + MaxTTL time.Duration +} + +const pathConfigLeaseHelpSyn = ` +Configure the default lease TTL settings for credentials +generated by the mongodb backend. +` + +const pathConfigLeaseHelpDesc = ` +This configures the default lease TTL settings used for +credentials generated by this backend. The ttl specifies the +duration that a set of credentials will be valid for before +the lease must be renewed (if it is renewable), while the +max_ttl specifies the overall maximum duration that the +credentials will be valid regardless of lease renewals. + +The format for the TTL values is an integer and then unit. For +example, the value "1h" specifies a 1-hour TTL. The longest +supported unit is hours. +` diff --git a/builtin/logical/mongodb/path_creds_create.go b/builtin/logical/mongodb/path_creds_create.go new file mode 100644 index 0000000000000..ca65334086696 --- /dev/null +++ b/builtin/logical/mongodb/path_creds_create.go @@ -0,0 +1,119 @@ +package mongodb + +import ( + "context" + "fmt" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathCredsCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role to generate credentials for.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead, + }, + + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, + } +} + +func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + // Determine if we have a lease configuration + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + leaseConfig = &configLease{} + } + + // Generate the username and password + displayName := req.DisplayName + if displayName != "" { + displayName += "-" + } + + userUUID, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + username := fmt.Sprintf("vault-%s%s", displayName, userUUID) + + password, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + // Build the user creation command + createUserCmd := createUserCommand{ + Username: username, + Password: password, + Roles: role.MongoDBRoles.toStandardRolesArray(), + } + + // Get our connection + session, err := b.Session(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Create the user + err = session.DB(role.DB).Run(createUserCmd, nil) + if err != nil { + return nil, err + } + + // Return the secret + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "db": role.DB, + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + "db": role.DB, + }) + resp.Secret.TTL = leaseConfig.TTL + resp.Secret.MaxTTL = leaseConfig.MaxTTL + + return resp, nil +} + +type createUserCommand struct { + Username string `bson:"createUser"` + Password string `bson:"pwd"` + Roles []interface{} `bson:"roles"` +} + +const pathCredsCreateReadHelpSyn = ` +Request MongoDB database credentials for a particular role. +` + +const pathCredsCreateReadHelpDesc = ` +This path reads generates MongoDB database credentials for +a particular role. The database credentials will be +generated on demand and will be automatically revoked when +the lease is up. +` diff --git a/builtin/logical/mongodb/path_roles.go b/builtin/logical/mongodb/path_roles.go new file mode 100644 index 0000000000000..9bea9e4d4aaa7 --- /dev/null +++ b/builtin/logical/mongodb/path_roles.go @@ -0,0 +1,224 @@ +package mongodb + +import ( + "context" + "encoding/json" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + "db": { + Type: framework.TypeString, + Description: "Name of the authentication database for users generated for this role.", + }, + "roles": { + Type: framework.TypeString, + Description: "MongoDB roles to assign to the users generated for this role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleCreate, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleStorageEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleStorageEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + rolesJsonBytes, err := json.Marshal(role.MongoDBRoles.toStandardRolesArray()) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "db": role.DB, + "roles": string(rolesJsonBytes), + }, + }, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("Missing name"), nil + } + + roleDB := data.Get("db").(string) + if roleDB == "" { + return logical.ErrorResponse("db parameter is required"), nil + } + + // Example roles JSON: + // + // [ "readWrite", { "role": "readWrite", "db": "test" } ] + // + // For storage, we convert such an array into a homogeneous array of role documents like: + // + // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ] + // + var roles []mongodbRole + rolesJson := []byte(data.Get("roles").(string)) + if len(rolesJson) > 0 { + var rolesArray []interface{} + err := json.Unmarshal(rolesJson, &rolesArray) + if err != nil { + return nil, err + } + for _, rawRole := range rolesArray { + switch role := rawRole.(type) { + case string: + roles = append(roles, mongodbRole{Role: role}) + case map[string]interface{}: + if db, ok := role["db"].(string); ok { + if roleName, ok := role["role"].(string); ok { + roles = append(roles, mongodbRole{Role: roleName, DB: db}) + } + } + } + } + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleStorageEntry{ + DB: roleDB, + MongoDBRoles: roles, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (roles mongodbRoles) toStandardRolesArray() []interface{} { + // Convert array of role documents like: + // + // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ] + // + // into a "standard" MongoDB roles array containing both strings and role documents: + // + // [ "readWrite", { "role": "readWrite", "db": "test" } ] + // + // MongoDB's createUser command accepts the latter. + // + var standardRolesArray []interface{} + for _, role := range roles { + if role.DB == "" { + standardRolesArray = append(standardRolesArray, role.Role) + } else { + standardRolesArray = append(standardRolesArray, role) + } + } + return standardRolesArray +} + +type roleStorageEntry struct { + DB string `json:"db"` + MongoDBRoles mongodbRoles `json:"roles"` +} + +type mongodbRole struct { + Role string `json:"role" bson:"role"` + DB string `json:"db" bson:"db"` +} + +type mongodbRoles []mongodbRole + +const pathRoleHelpSyn = ` +Manage the roles used to generate MongoDB credentials. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles used to generate MongoDB credentials. + +The "db" parameter specifies the authentication database for users +generated for a given role. + +The "roles" parameter specifies the MongoDB roles that should be assigned +to users created for a given role. Just like when creating a user directly +using db.createUser, the roles JSON array can specify both built-in roles +and user-defined roles for both the database the user is created in and +for other databases. + +For example, the following roles JSON array grants the "readWrite" +permission on both the user's authentication database and the "test" +database: + +[ "readWrite", { "role": "readWrite", "db": "test" } ] + +Please consult the MongoDB documentation for more +details on Role-Based Access Control in MongoDB. +` diff --git a/builtin/logical/mongodb/secret_creds.go b/builtin/logical/mongodb/secret_creds.go new file mode 100644 index 0000000000000..4e5e09fc24279 --- /dev/null +++ b/builtin/logical/mongodb/secret_creds.go @@ -0,0 +1,84 @@ +package mongodb + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + mgo "gopkg.in/mgo.v2" +) + +const SecretCredsType = "creds" + +func secretCreds(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username", + }, + + "password": { + Type: framework.TypeString, + Description: "Password", + }, + }, + + Renew: b.secretCredsRenew, + Revoke: b.secretCredsRevoke, + } +} + +func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the lease information + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + leaseConfig = &configLease{} + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = leaseConfig.TTL + resp.Secret.MaxTTL = leaseConfig.MaxTTL + return resp, nil +} + +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("username internal data is not a string") + } + + // Get the db from the internal data + dbRaw, ok := req.Secret.InternalData["db"] + if !ok { + return nil, fmt.Errorf("secret is missing db internal data") + } + db, ok := dbRaw.(string) + if !ok { + return nil, fmt.Errorf("db internal data is not a string") + } + + // Get our connection + session, err := b.Session(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Drop the user + err = session.DB(db).RemoveUser(username) + if err != nil && err != mgo.ErrNotFound { + return nil, err + } + + return nil, nil +} diff --git a/builtin/logical/mongodb/util.go b/builtin/logical/mongodb/util.go new file mode 100644 index 0000000000000..1880bbd40b41d --- /dev/null +++ b/builtin/logical/mongodb/util.go @@ -0,0 +1,81 @@ +package mongodb + +import ( + "crypto/tls" + "errors" + "net" + "net/url" + "strconv" + "strings" + "time" + + mgo "gopkg.in/mgo.v2" +) + +// Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that +// ourselves. See https://github.com/go-mgo/mgo/issues/84 +func parseMongoURI(rawUri string) (*mgo.DialInfo, error) { + uri, err := url.Parse(rawUri) + if err != nil { + return nil, err + } + + info := mgo.DialInfo{ + Addrs: strings.Split(uri.Host, ","), + Database: strings.TrimPrefix(uri.Path, "/"), + Timeout: 10 * time.Second, + } + + if uri.User != nil { + info.Username = uri.User.Username() + info.Password, _ = uri.User.Password() + } + + query := uri.Query() + for key, values := range query { + var value string + if len(values) > 0 { + value = values[0] + } + + switch key { + case "authSource": + info.Source = value + case "authMechanism": + info.Mechanism = value + case "gssapiServiceName": + info.Service = value + case "replicaSet": + info.ReplicaSetName = value + case "maxPoolSize": + poolLimit, err := strconv.Atoi(value) + if err != nil { + return nil, errors.New("bad value for maxPoolSize: " + value) + } + info.PoolLimit = poolLimit + case "ssl": + ssl, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.New("bad value for ssl: " + value) + } + if ssl { + info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { + return tls.Dial("tcp", addr.String(), &tls.Config{}) + } + } + case "connect": + if value == "direct" { + info.Direct = true + break + } + if value == "replicaSet" { + break + } + fallthrough + default: + return nil, errors.New("unsupported connection URL option: " + key + "=" + value) + } + } + + return &info, nil +} diff --git a/builtin/logical/mssql/backend.go b/builtin/logical/mssql/backend.go new file mode 100644 index 0000000000000..b06a7ff96c7f7 --- /dev/null +++ b/builtin/logical/mssql/backend.go @@ -0,0 +1,160 @@ +package mssql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "sync" + + _ "github.com/denisenkom/go-mssqldb" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + + Paths: []*framework.Path{ + pathConfigConnection(&b), + pathConfigLease(&b), + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Invalidate: b.invalidate, + Clean: b.ResetDB, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend + + db *sql.DB + defaultDb string + lock sync.Mutex +} + +// DB returns the default database connection. +func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { + b.lock.Lock() + defer b.lock.Unlock() + + // If we already have a DB, we got it! + if b.db != nil { + if err := b.db.Ping(); err == nil { + return b.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + b.db.Close() + } + + // Otherwise, attempt to make connection + entry, err := s.Get(ctx, "config/connection") + if err != nil { + return nil, err + } + if entry == nil { + return nil, fmt.Errorf("configure the DB connection with config/connection first") + } + + var connConfig connectionConfig + if err := entry.DecodeJSON(&connConfig); err != nil { + return nil, err + } + connString := connConfig.ConnectionString + + db, err := sql.Open("sqlserver", connString) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + db.SetMaxOpenConns(connConfig.MaxOpenConnections) + + stmt, err := db.Prepare("SELECT db_name();") + if err != nil { + return nil, err + } + defer stmt.Close() + + err = stmt.QueryRow().Scan(&b.defaultDb) + if err != nil { + return nil, err + } + + b.db = db + return b.db, nil +} + +// ResetDB forces a connection next time DB() is called. +func (b *backend) ResetDB(_ context.Context) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.db != nil { + b.db.Close() + } + + b.db = nil +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch key { + case "config/connection": + b.ResetDB(ctx) + } +} + +// LeaseConfig returns the lease configuration +func (b *backend) LeaseConfig(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, "config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +const backendHelp = ` +The MSSQL backend dynamically generates database users. + +After mounting this backend, configure it using the endpoints within +the "config/" path. + +This backend does not support Azure SQL Databases. +` diff --git a/builtin/logical/mssql/backend_test.go b/builtin/logical/mssql/backend_test.go new file mode 100644 index 0000000000000..afb239c2d3fb5 --- /dev/null +++ b/builtin/logical/mssql/backend_test.go @@ -0,0 +1,222 @@ +package mssql + +import ( + "context" + "fmt" + "log" + "reflect" + "testing" + + _ "github.com/denisenkom/go-mssqldb" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + mssqlhelper "github.com/hashicorp/vault/helper/testhelpers/mssql" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func Backend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_string": "sample_connection_string", + "max_open_connections": 7, + "verify_connection": false, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(configData, "verify_connection") + delete(configData, "connection_string") + if !reflect.DeepEqual(configData, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data) + } +} + +func TestBackend_basic(t *testing.T) { + b, _ := Factory(context.Background(), logical.TestBackendConfig()) + + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, connURL), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connURL), + testAccStepRole(t), + testAccStepReadCreds(t, "web"), + }, + }) +} + +func TestBackend_roleCrud(t *testing.T) { + b := Backend() + + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, connURL), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connURL), + testAccStepRole(t), + testAccStepReadRole(t, "web", testRoleSQL), + testAccStepDeleteRole(t, "web"), + testAccStepReadRole(t, "web", ""), + }, + }) +} + +func TestBackend_leaseWriteRead(t *testing.T) { + b := Backend() + + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, connURL), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connURL), + testAccStepWriteLease(t), + testAccStepReadLease(t), + }, + }) +} + +func testAccPreCheckFunc(t *testing.T, connectionURL string) func() { + return func() { + if connectionURL == "" { + t.Fatal("connection URL must be set for acceptance tests") + } + } +} + +func testAccStepConfig(t *testing.T, connURL string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Data: map[string]interface{}{ + "connection_string": connURL, + }, + } +} + +func testAccStepRole(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/web", + Data: map[string]interface{}{ + "sql": testRoleSQL, + }, + } +} + +func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + n, + } +} + +func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated credentials: %v", d) + + return nil + }, + } +} + +func testAccStepReadRole(t *testing.T, name, sql string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if sql == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + SQL string `mapstructure:"sql"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.SQL != sql { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +func testAccStepWriteLease(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/lease", + Data: map[string]interface{}{ + "ttl": "1h5m", + "max_ttl": "24h", + }, + } +} + +func testAccStepReadLease(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "config/lease", + Check: func(resp *logical.Response) error { + if resp.Data["ttl"] != "1h5m0s" || resp.Data["max_ttl"] != "24h0m0s" { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +const testRoleSQL = ` +CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; +CREATE USER [{{name}}] FOR LOGIN [{{name}}]; +GRANT SELECT ON SCHEMA::dbo TO [{{name}}] +` diff --git a/builtin/logical/mssql/cmd/mssql/main.go b/builtin/logical/mssql/cmd/mssql/main.go new file mode 100644 index 0000000000000..0db9c1c982039 --- /dev/null +++ b/builtin/logical/mssql/cmd/mssql/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/mssql" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: mssql.Factory, + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/mssql/path_config_connection.go b/builtin/logical/mssql/path_config_connection.go new file mode 100644 index 0000000000000..f0ad63108e814 --- /dev/null +++ b/builtin/logical/mssql/path_config_connection.go @@ -0,0 +1,126 @@ +package mssql + +import ( + "context" + "database/sql" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigConnection(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/connection", + Fields: map[string]*framework.FieldSchema{ + "connection_string": { + Type: framework.TypeString, + Description: "DB connection parameters", + }, + "max_open_connections": { + Type: framework.TypeInt, + Description: "Maximum number of open connections to database", + }, + "verify_connection": { + Type: framework.TypeBool, + Default: true, + Description: "If set, connection_string is verified by actually connecting to the database", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionWrite, + logical.ReadOperation: b.pathConnectionRead, + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +// pathConnectionRead reads out the connection configuration +func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := req.Storage.Get(ctx, "config/connection") + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration") + } + if entry == nil { + return nil, nil + } + + var config connectionConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "max_open_connections": config.MaxOpenConnections, + }, + }, nil +} + +// pathConnectionWrite stores the connection configuration +func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + connString := data.Get("connection_string").(string) + + maxOpenConns := data.Get("max_open_connections").(int) + if maxOpenConns == 0 { + maxOpenConns = 2 + } + + // Don't check the connection_string if verification is disabled + verifyConnection := data.Get("verify_connection").(bool) + if verifyConnection { + // Verify the string + db, err := sql.Open("mssql", connString) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error validating connection info: %s", err)), nil + } + defer db.Close() + if err := db.Ping(); err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error validating connection info: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ + ConnectionString: connString, + MaxOpenConnections: maxOpenConns, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + // Reset the DB connection + b.ResetDB(ctx) + + resp := &logical.Response{} + resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string as it is, including passwords, if any.") + + return resp, nil +} + +type connectionConfig struct { + ConnectionString string `json:"connection_string" structs:"connection_string" mapstructure:"connection_string"` + MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` +} + +const pathConfigConnectionHelpSyn = ` +Configure the connection string to talk to Microsoft Sql Server. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection string used to connect to Sql Server. +The value of the string is a Data Source Name (DSN). An example is +using "server=;port=;user id=;password=;database=;app name=vault;" + +When configuring the connection string, the backend will verify its validity. +If the database is not available when setting the connection string, set the +"verify_connection" option to false. +` diff --git a/builtin/logical/mssql/path_config_lease.go b/builtin/logical/mssql/path_config_lease.go new file mode 100644 index 0000000000000..d0fe86dfbd40d --- /dev/null +++ b/builtin/logical/mssql/path_config_lease.go @@ -0,0 +1,114 @@ +package mssql + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + Fields: map[string]*framework.FieldSchema{ + "ttl": { + Type: framework.TypeString, + Description: "Default ttl for roles.", + }, + + "ttl_max": { + Type: framework.TypeString, + Description: `Deprecated: use "max_ttl" instead. Maximum +time a credential is valid for.`, + }, + + "max_ttl": { + Type: framework.TypeString, + Description: "Maximum time a credential is valid for.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigLeaseRead, + logical.UpdateOperation: b.pathConfigLeaseWrite, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +func (b *backend) pathConfigLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ttlRaw := d.Get("ttl").(string) + ttlMaxRaw := d.Get("max_ttl").(string) + if len(ttlMaxRaw) == 0 { + ttlMaxRaw = d.Get("ttl_max").(string) + } + + ttl, err := time.ParseDuration(ttlRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid ttl: %s", err)), nil + } + ttlMax, err := time.ParseDuration(ttlMaxRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid max_ttl: %s", err)), nil + } + + // Store it + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + TTL: ttl, + TTLMax: ttlMax, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathConfigLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + return nil, nil + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "ttl": leaseConfig.TTL.String(), + "ttl_max": leaseConfig.TTLMax.String(), + "max_ttl": leaseConfig.TTLMax.String(), + }, + } + resp.AddWarning("The field ttl_max is deprecated and will be removed in a future release. Use max_ttl instead.") + + return resp, nil +} + +type configLease struct { + TTL time.Duration + TTLMax time.Duration +} + +const pathConfigLeaseHelpSyn = ` +Configure the default lease ttl for generated credentials. +` + +const pathConfigLeaseHelpDesc = ` +This configures the default lease ttl used for credentials +generated by this backend. The ttl specifies the duration that a +credential will be valid for, as well as the maximum session for +a set of credentials. + +The format for the ttl is "1h" or integer and then unit. The longest +unit is hour. +` diff --git a/builtin/logical/mssql/path_creds_create.go b/builtin/logical/mssql/path_creds_create.go new file mode 100644 index 0000000000000..42e8642c031aa --- /dev/null +++ b/builtin/logical/mssql/path_creds_create.go @@ -0,0 +1,129 @@ +package mssql + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathCredsCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead, + }, + + HelpSynopsis: pathCredsCreateHelpSyn, + HelpDescription: pathCredsCreateHelpDesc, + } +} + +func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + // Determine if we have a lease configuration + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + leaseConfig = &configLease{} + } + + // Generate our username and password + displayName := req.DisplayName + if len(displayName) > 10 { + displayName = displayName[:10] + } + userUUID, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + username := fmt.Sprintf("%s-%s", displayName, userUUID) + password, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + // Get our handle + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Always reset database to default db of connection. Since it is in a + // transaction, all statements will be on the same connection in the pool. + roleSQL := fmt.Sprintf("USE [%s]; %s", b.defaultDb, role.SQL) + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(roleSQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return nil, err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return nil, err + } + + // Return the secret + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + }) + resp.Secret.TTL = leaseConfig.TTL + resp.Secret.MaxTTL = leaseConfig.TTLMax + + return resp, nil +} + +const pathCredsCreateHelpSyn = ` +Request database credentials for a certain role. +` + +const pathCredsCreateHelpDesc = ` +This path reads database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/builtin/logical/mssql/path_roles.go b/builtin/logical/mssql/path_roles.go new file mode 100644 index 0000000000000..e378422d3cf79 --- /dev/null +++ b/builtin/logical/mssql/path_roles.go @@ -0,0 +1,172 @@ +package mssql + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + "sql": { + Type: framework.TypeString, + Description: "SQL string to create a role. See help for more info.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleCreate, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "sql": role.SQL, + }, + }, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + sql := data.Get("sql").(string) + + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Test the query by trying to prepare it + for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := db.Prepare(Query(query, map[string]string{ + "name": "foo", + "password": "bar", + })) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error testing query: %s", err)), nil + } + stmt.Close() + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ + SQL: sql, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + return nil, nil +} + +type roleEntry struct { + SQL string `json:"sql"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "sql" parameter customizes the SQL string used to create the login to +the server. The parameter can be a sequence of SQL queries, each semi-colon +separated. Some substitution will be done to the SQL string for certain keys. +The names of the variables must be surrounded by "{{" and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + +Example SQL query to use: + + CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; + CREATE USER [{{name}}] FROM LOGIN [{{name}}]; + GRANT SELECT, UPDATE, DELETE, INSERT on SCHEMA::dbo TO [{{name}}]; + +Please see the Microsoft SQL Server manual on the GRANT command to learn how to +do more fine grained access. +` diff --git a/builtin/logical/mssql/secret_creds.go b/builtin/logical/mssql/secret_creds.go new file mode 100644 index 0000000000000..9fc52f9a777ac --- /dev/null +++ b/builtin/logical/mssql/secret_creds.go @@ -0,0 +1,180 @@ +package mssql + +import ( + "context" + "database/sql" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/logical" +) + +const SecretCredsType = "creds" + +func secretCreds(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username", + }, + + "password": { + Type: framework.TypeString, + Description: "Password", + }, + }, + + Renew: b.secretCredsRenew, + Revoke: b.secretCredsRevoke, + } +} + +func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the lease information + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + leaseConfig = &configLease{} + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = leaseConfig.TTL + resp.Secret.MaxTTL = leaseConfig.TTLMax + return resp, nil +} + +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // First disable server login + disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username)) + if err != nil { + return nil, err + } + defer disableStmt.Close() + if _, err := disableStmt.Exec(); err != nil { + return nil, err + } + + // Query for sessions for the login so that we can kill any outstanding + // sessions. There cannot be any active sessions before we drop the logins + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + sessionStmt, err := db.Prepare("SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = @p1;") + if err != nil { + return nil, err + } + defer sessionStmt.Close() + + sessionRows, err := sessionStmt.Query(username) + if err != nil { + return nil, err + } + defer sessionRows.Close() + + var revokeStmts []string + for sessionRows.Next() { + var sessionID int + err = sessionRows.Scan(&sessionID) + if err != nil { + return nil, err + } + revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID)) + } + + // Query for database users using undocumented stored procedure for now since + // it is the easiest way to get this information; + // we need to drop the database users before we can drop the login and the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.Prepare("EXEC master.dbo.sp_msloginmappings @p1;") + if err != nil { + return nil, err + } + defer stmt.Close() + + rows, err := stmt.Query(username) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var loginName, dbName, qUsername, aliasName sql.NullString + err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName) + if err != nil { + return nil, err + } + if !dbName.Valid { + continue + } + revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName.String, username, username)) + } + + // we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revokeStmts { + if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { + lastStmtError = err + continue + } + } + + // can't drop if not all database users are dropped + if rows.Err() != nil { + return nil, fmt.Errorf("could not generate sql statements for all rows: %w", rows.Err()) + } + if lastStmtError != nil { + return nil, fmt.Errorf("could not perform all sql statements: %w", lastStmtError) + } + + // Drop this login + stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username)) + if err != nil { + return nil, err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return nil, err + } + + return nil, nil +} + +const dropUserSQL = ` +USE [%s] +IF EXISTS + (SELECT name + FROM sys.database_principals + WHERE name = N'%s') +BEGIN + DROP USER [%s] +END +` + +const dropLoginSQL = ` +IF EXISTS + (SELECT name + FROM master.sys.server_principals + WHERE name = N'%s') +BEGIN + DROP LOGIN [%s] +END +` diff --git a/builtin/logical/mssql/util.go b/builtin/logical/mssql/util.go new file mode 100644 index 0000000000000..17c46c6813bb8 --- /dev/null +++ b/builtin/logical/mssql/util.go @@ -0,0 +1,28 @@ +package mssql + +import ( + "fmt" + "strings" +) + +// SplitSQL is used to split a series of SQL statements +func SplitSQL(sql string) []string { + parts := strings.Split(sql, ";") + out := make([]string, 0, len(parts)) + for _, p := range parts { + clean := strings.TrimSpace(p) + if len(clean) > 0 { + out = append(out, clean) + } + } + return out +} + +// Query templates a query for us. +func Query(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + + return tpl +} diff --git a/builtin/logical/mysql/backend.go b/builtin/logical/mysql/backend.go new file mode 100644 index 0000000000000..b9840dad3000d --- /dev/null +++ b/builtin/logical/mysql/backend.go @@ -0,0 +1,151 @@ +package mysql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "sync" + + _ "github.com/go-sql-driver/mysql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + + Paths: []*framework.Path{ + pathConfigConnection(&b), + pathConfigLease(&b), + pathListRoles(&b), + pathRoles(&b), + pathRoleCreate(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Invalidate: b.invalidate, + Clean: b.ResetDB, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend + + db *sql.DB + lock sync.Mutex +} + +// DB returns the database connection. +func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { + b.lock.Lock() + defer b.lock.Unlock() + + // If we already have a DB, we got it! + if b.db != nil { + if err := b.db.Ping(); err == nil { + return b.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + b.db.Close() + } + + // Otherwise, attempt to make connection + entry, err := s.Get(ctx, "config/connection") + if err != nil { + return nil, err + } + if entry == nil { + return nil, + fmt.Errorf("configure the DB connection with config/connection first") + } + + var connConfig connectionConfig + if err := entry.DecodeJSON(&connConfig); err != nil { + return nil, err + } + + conn := connConfig.ConnectionURL + if len(conn) == 0 { + conn = connConfig.ConnectionString + } + + b.db, err = sql.Open("mysql", conn) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + b.db.SetMaxOpenConns(connConfig.MaxOpenConnections) + b.db.SetMaxIdleConns(connConfig.MaxIdleConnections) + + return b.db, nil +} + +// ResetDB forces a connection next time DB() is called. +func (b *backend) ResetDB(_ context.Context) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.db != nil { + b.db.Close() + } + + b.db = nil +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch key { + case "config/connection": + b.ResetDB(ctx) + } +} + +// Lease returns the lease information +func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, "config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +const backendHelp = ` +The MySQL backend dynamically generates database users. + +After mounting this backend, configure it using the endpoints within +the "config/" path. +` diff --git a/builtin/logical/mysql/backend_test.go b/builtin/logical/mysql/backend_test.go new file mode 100644 index 0000000000000..62074d8ed4e5b --- /dev/null +++ b/builtin/logical/mysql/backend_test.go @@ -0,0 +1,307 @@ +package mysql + +import ( + "context" + "fmt" + "log" + "reflect" + "testing" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func TestBackend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_url": "sample_connection_url", + "max_open_connections": 9, + "max_idle_connections": 7, + "verify_connection": false, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(configData, "verify_connection") + delete(configData, "connection_url") + if !reflect.DeepEqual(configData, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data) + } +} + +func TestBackend_basic(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + + // for wildcard based mysql user + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepRole(t, true), + testAccStepReadCreds(t, "web"), + }, + }) +} + +func TestBackend_basicHostRevoke(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + + // for host based mysql user + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepRole(t, false), + testAccStepReadCreds(t, "web"), + }, + }) +} + +func TestBackend_roleCrud(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + // test SQL with wildcard based user + testAccStepRole(t, true), + testAccStepReadRole(t, "web", testRoleWildCard), + testAccStepDeleteRole(t, "web"), + // test SQL with host based user + testAccStepRole(t, false), + testAccStepReadRole(t, "web", testRoleHost), + testAccStepDeleteRole(t, "web"), + }, + }) +} + +func TestBackend_leaseWriteRead(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepWriteLease(t), + testAccStepReadLease(t), + }, + }) +} + +func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Data: d, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if expectError { + if resp.Data == nil { + return fmt.Errorf("data is nil") + } + var e struct { + Error string `mapstructure:"error"` + } + if err := mapstructure.Decode(resp.Data, &e); err != nil { + return err + } + if len(e.Error) == 0 { + return fmt.Errorf("expected error, but write succeeded") + } + return nil + } else if resp != nil && resp.IsError() { + return fmt.Errorf("got an error response: %v", resp.Error()) + } + return nil + }, + } +} + +func testAccStepRole(t *testing.T, wildCard bool) logicaltest.TestStep { + pathData := make(map[string]interface{}) + if wildCard { + pathData = map[string]interface{}{ + "sql": testRoleWildCard, + } + } else { + pathData = map[string]interface{}{ + "sql": testRoleHost, + "revocation_sql": testRevocationSQL, + } + } + + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/web", + Data: pathData, + } +} + +func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + n, + } +} + +func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated credentials: %v", d) + + return nil + }, + } +} + +func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if sql == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + SQL string `mapstructure:"sql"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.SQL != sql { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +func testAccStepWriteLease(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/lease", + Data: map[string]interface{}{ + "lease": "1h5m", + "lease_max": "24h", + }, + } +} + +func testAccStepReadLease(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "config/lease", + Check: func(resp *logical.Response) error { + if resp.Data["lease"] != "1h5m0s" || resp.Data["lease_max"] != "24h0m0s" { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +const testRoleWildCard = ` +CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; +GRANT SELECT ON *.* TO '{{name}}'@'%'; +` + +const testRoleHost = ` +CREATE USER '{{name}}'@'10.1.1.2' IDENTIFIED BY '{{password}}'; +GRANT SELECT ON *.* TO '{{name}}'@'10.1.1.2'; +` + +const testRevocationSQL = ` +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'10.1.1.2'; +DROP USER '{{name}}'@'10.1.1.2'; +` diff --git a/builtin/logical/mysql/cmd/mysql/main.go b/builtin/logical/mysql/cmd/mysql/main.go new file mode 100644 index 0000000000000..e1fbe4a01e492 --- /dev/null +++ b/builtin/logical/mysql/cmd/mysql/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/mysql" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: mysql.Factory, + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/mysql/path_config_connection.go b/builtin/logical/mysql/path_config_connection.go new file mode 100644 index 0000000000000..151fd7c8787c3 --- /dev/null +++ b/builtin/logical/mysql/path_config_connection.go @@ -0,0 +1,159 @@ +package mysql + +import ( + "context" + "database/sql" + "fmt" + + _ "github.com/go-sql-driver/mysql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigConnection(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/connection", + Fields: map[string]*framework.FieldSchema{ + "connection_url": { + Type: framework.TypeString, + Description: "DB connection string", + }, + "value": { + Type: framework.TypeString, + Description: `DB connection string. Use 'connection_url' instead. +This name is deprecated.`, + }, + "max_open_connections": { + Type: framework.TypeInt, + Description: "Maximum number of open connections to database", + }, + "max_idle_connections": { + Type: framework.TypeInt, + Description: "Maximum number of idle connections to the database; a zero uses the value of max_open_connections and a negative value disables idle connections. If larger than max_open_connections it will be reduced to the same size.", + }, + "verify_connection": { + Type: framework.TypeBool, + Default: true, + Description: "If set, connection_url is verified by actually connecting to the database", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionWrite, + logical.ReadOperation: b.pathConnectionRead, + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +// pathConnectionRead reads out the connection configuration +func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := req.Storage.Get(ctx, "config/connection") + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration") + } + if entry == nil { + return nil, nil + } + + var config connectionConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "max_open_connections": config.MaxOpenConnections, + "max_idle_connections": config.MaxIdleConnections, + }, + }, nil +} + +func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + connValue := data.Get("value").(string) + connURL := data.Get("connection_url").(string) + if connURL == "" { + if connValue == "" { + return logical.ErrorResponse("the connection_url parameter must be supplied"), nil + } else { + connURL = connValue + } + } + + maxOpenConns := data.Get("max_open_connections").(int) + if maxOpenConns == 0 { + maxOpenConns = 2 + } + + maxIdleConns := data.Get("max_idle_connections").(int) + if maxIdleConns == 0 { + maxIdleConns = maxOpenConns + } + if maxIdleConns > maxOpenConns { + maxIdleConns = maxOpenConns + } + + // Don't check the connection_url if verification is disabled + verifyConnection := data.Get("verify_connection").(bool) + if verifyConnection { + // Verify the string + db, err := sql.Open("mysql", connURL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "error validating connection info: %s", err)), nil + } + defer db.Close() + if err := db.Ping(); err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "error validating connection info: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ + ConnectionURL: connURL, + MaxOpenConnections: maxOpenConns, + MaxIdleConnections: maxIdleConns, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + // Reset the DB connection + b.ResetDB(ctx) + + resp := &logical.Response{} + resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URL as it is, including passwords, if any.") + + return resp, nil +} + +type connectionConfig struct { + ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` + // Deprecate "value" in coming releases + ConnectionString string `json:"value" structs:"value" mapstructure:"value"` + MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"` +} + +const pathConfigConnectionHelpSyn = ` +Configure the connection string to talk to MySQL. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection string used to connect to MySQL. The value +of the string is a Data Source Name (DSN). An example is using +"username:password@protocol(address)/dbname?param=value" + +For example, RDS may look like: +"id:password@tcp(your-amazonaws-uri.com:3306)/dbname" + +When configuring the connection string, the backend will verify its validity. +If the database is not available when setting the connection URL, set the +"verify_connection" option to false. +` diff --git a/builtin/logical/mysql/path_config_lease.go b/builtin/logical/mysql/path_config_lease.go new file mode 100644 index 0000000000000..e8b0543e0101a --- /dev/null +++ b/builtin/logical/mysql/path_config_lease.go @@ -0,0 +1,101 @@ +package mysql + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + Fields: map[string]*framework.FieldSchema{ + "lease": { + Type: framework.TypeString, + Description: "Default lease for roles.", + }, + + "lease_max": { + Type: framework.TypeString, + Description: "Maximum time a credential is valid for.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseWrite, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + leaseRaw := d.Get("lease").(string) + leaseMaxRaw := d.Get("lease_max").(string) + + lease, err := time.ParseDuration(leaseRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid lease: %s", err)), nil + } + leaseMax, err := time.ParseDuration(leaseMaxRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid lease: %s", err)), nil + } + + // Store it + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + Lease: lease, + LeaseMax: leaseMax, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "lease": lease.Lease.String(), + "lease_max": lease.LeaseMax.String(), + }, + }, nil +} + +type configLease struct { + Lease time.Duration + LeaseMax time.Duration +} + +const pathConfigLeaseHelpSyn = ` +Configure the default lease information for generated credentials. +` + +const pathConfigLeaseHelpDesc = ` +This configures the default lease information used for credentials +generated by this backend. The lease specifies the duration that a +credential will be valid for, as well as the maximum session for +a set of credentials. + +The format for the lease is "1h" or integer and then unit. The longest +unit is hour. +` diff --git a/builtin/logical/mysql/path_role_create.go b/builtin/logical/mysql/path_role_create.go new file mode 100644 index 0000000000000..6bc0c4b3b3214 --- /dev/null +++ b/builtin/logical/mysql/path_role_create.go @@ -0,0 +1,143 @@ +package mysql + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathRoleCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleCreateRead, + }, + + HelpSynopsis: pathRoleCreateReadHelpSyn, + HelpDescription: pathRoleCreateReadHelpDesc, + } +} + +func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + // Determine if we have a lease + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } + + // Generate our username and password. The username will be a + // concatenation of: + // + // - the role name, truncated to role.rolenameLength (default 4) + // - the token display name, truncated to role.displaynameLength (default 4) + // - a UUID + // + // the entire concatenated string is then truncated to role.usernameLength, + // which by default is 16 due to limitations in older but still-prevalent + // versions of MySQL. + roleName := name + if len(roleName) > role.RolenameLength { + roleName = roleName[:role.RolenameLength] + } + displayName := req.DisplayName + if len(displayName) > role.DisplaynameLength { + displayName = displayName[:role.DisplaynameLength] + } + userUUID, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + username := fmt.Sprintf("%s-%s-%s", roleName, displayName, userUUID) + if len(username) > role.UsernameLength { + username = username[:role.UsernameLength] + } + password, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + // Get our handle + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return nil, err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return nil, err + } + + // Return the secret + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + "role": name, + }) + + resp.Secret.TTL = lease.Lease + resp.Secret.MaxTTL = lease.LeaseMax + + return resp, nil +} + +const pathRoleCreateReadHelpSyn = ` +Request database credentials for a certain role. +` + +const pathRoleCreateReadHelpDesc = ` +This path reads database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/builtin/logical/mysql/path_roles.go b/builtin/logical/mysql/path_roles.go new file mode 100644 index 0000000000000..eecf48732fe21 --- /dev/null +++ b/builtin/logical/mysql/path_roles.go @@ -0,0 +1,230 @@ +package mysql + +import ( + "context" + "fmt" + "strings" + + _ "github.com/go-sql-driver/mysql" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + "sql": { + Type: framework.TypeString, + Description: "SQL string to create a user. See help for more info.", + }, + + "revocation_sql": { + Type: framework.TypeString, + Description: "SQL string to revoke a user. See help for more info.", + }, + + "username_length": { + Type: framework.TypeInt, + Description: "number of characters to truncate generated mysql usernames to (default 16)", + Default: 16, + }, + + "rolename_length": { + Type: framework.TypeInt, + Description: "number of characters to truncate the rolename portion of generated mysql usernames to (default 4)", + Default: 4, + }, + + "displayname_length": { + Type: framework.TypeInt, + Description: "number of characters to truncate the displayname portion of generated mysql usernames to (default 4)", + Default: 4, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleCreate, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + // Set defaults to handle upgrade cases + result := roleEntry{ + UsernameLength: 16, + RolenameLength: 4, + DisplaynameLength: 4, + } + + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "sql": role.SQL, + "revocation_sql": role.RevocationSQL, + }, + }, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Test the query by trying to prepare it + sql := data.Get("sql").(string) + for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := db.Prepare(Query(query, map[string]string{ + "name": "foo", + "password": "bar", + })) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error testing query: %s", err)), nil + } + stmt.Close() + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ + SQL: sql, + RevocationSQL: data.Get("revocation_sql").(string), + UsernameLength: data.Get("username_length").(int), + DisplaynameLength: data.Get("displayname_length").(int), + RolenameLength: data.Get("rolename_length").(int), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + return nil, nil +} + +type roleEntry struct { + SQL string `json:"sql" mapstructure:"sql" structs:"sql"` + RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"` + UsernameLength int `json:"username_length" mapstructure:"username_length" structs:"username_length"` + DisplaynameLength int `json:"displayname_length" mapstructure:"displayname_length" structs:"displayname_length"` + RolenameLength int `json:"rolename_length" mapstructure:"rolename_length" structs:"rolename_length"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "sql" parameter customizes the SQL string used to create the role. +This can be a sequence of SQL queries, each semi-colon separated. Some +substitution will be done to the SQL string for certain keys. +The names of the variables must be surrounded by "{{" and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + +Example of a decent SQL query to use: + + CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT ALL ON db1.* TO '{{name}}'@'%'; + +Note the above user would be able to access anything in db1. Please see the MySQL +manual on the GRANT command to learn how to do more fine grained access. + +The "rolename_length" parameter determines how many characters of the role name +will be used in creating the generated mysql username; the default is 4. + +The "displayname_length" parameter determines how many characters of the token +display name will be used in creating the generated mysql username; the default +is 4. + +The "username_length" parameter determines how many total characters the +generated username (including the role name, token display name and the uuid +portion) will be truncated to. Versions of MySQL prior to 5.7.8 are limited to +16 characters total (see +http://dev.mysql.com/doc/refman/5.7/en/user-names.html) so that is the default; +for versions >=5.7.8 it is safe to increase this to 32. + +For best readability in MySQL process lists, we recommend using MySQL 5.7.8 or +later, setting "username_length" to 32 and setting both "rolename_length" and +"displayname_length" to 8. However due the the prevalence of older versions of +MySQL in general deployment, the defaults are currently tuned for a +username_length of 16. +` diff --git a/builtin/logical/mysql/secret_creds.go b/builtin/logical/mysql/secret_creds.go new file mode 100644 index 0000000000000..454edbaa927e1 --- /dev/null +++ b/builtin/logical/mysql/secret_creds.go @@ -0,0 +1,136 @@ +package mysql + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const SecretCredsType = "creds" + +// defaultRevocationSQL is a default SQL statement for revoking a user. Revoking +// permissions for the user is done before the drop, because MySQL explicitly +// documents that open user connections will not be closed. By revoking all +// grants, at least we ensure that the open connection is useless. Dropping the +// user will only affect the next connection. +const defaultRevocationSQL = ` +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; +DROP USER '{{name}}'@'%' +` + +func secretCreds(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username", + }, + + "password": { + Type: framework.TypeString, + Description: "Password", + }, + }, + + Renew: b.secretCredsRenew, + Revoke: b.secretCredsRevoke, + } +} + +func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the lease information + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = lease.Lease + resp.Secret.MaxTTL = lease.LeaseMax + return resp, nil +} + +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var resp *logical.Response + + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("usernameRaw is not a string") + } + + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + roleName := "" + roleNameRaw, ok := req.Secret.InternalData["role"] + if ok { + roleName = roleNameRaw.(string) + } + + var role *roleEntry + if roleName != "" { + role, err = b.Role(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + } + + // Use a default SQL statement for revocation if one cannot be fetched from the role + revocationSQL := defaultRevocationSQL + + if role != nil && role.RevocationSQL != "" { + revocationSQL = role.RevocationSQL + } else { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default SQL for revoking user.", roleName)) + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return nil, err + } + defer tx.Rollback() + + for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + // This is not a prepared statement because not all commands are supported + // 1295: This command is not supported in the prepared statement protocol yet + // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ + query = strings.ReplaceAll(query, "{{name}}", username) + _, err = tx.Exec(query) + if err != nil { + return nil, err + } + + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/builtin/logical/mysql/util.go b/builtin/logical/mysql/util.go new file mode 100644 index 0000000000000..4ba7c650c208d --- /dev/null +++ b/builtin/logical/mysql/util.go @@ -0,0 +1,15 @@ +package mysql + +import ( + "fmt" + "strings" +) + +// Query templates a query for us. +func Query(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + + return tpl +} diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 0becdae776b12..e1df32e87a831 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( @@ -11,8 +8,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const operationPrefixNomad = "nomad" - // Factory returns a Nomad backend that satisfies the logical.Backend interface func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 1fe6adbb2d38e..8452c2b019e41 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( @@ -8,14 +5,13 @@ import ( "fmt" "os" "reflect" - "runtime" "strings" "testing" "time" nomadapi "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -39,11 +35,6 @@ func (c *Config) Client() (*nomadapi.Client, error) { } func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { - // Skipping on ARM, as this image can't run on ARM architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - if retAddress := os.Getenv("NOMAD_ADDR"); retAddress != "" { s, err := docker.NewServiceURLParse(retAddress) if err != nil { @@ -53,7 +44,7 @@ func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/multani/nomad", + ImageRepo: "multani/nomad", ImageTag: "1.1.6", ContainerName: "nomad", Ports: []string{"4646/tcp"}, diff --git a/builtin/logical/nomad/cmd/nomad/main.go b/builtin/logical/nomad/cmd/nomad/main.go index 10f45aabb4837..31b1c93500e7d 100644 --- a/builtin/logical/nomad/cmd/nomad/main.go +++ b/builtin/logical/nomad/cmd/nomad/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: nomad.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index cbb214002438f..b482a9c1aca86 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( @@ -16,11 +13,6 @@ const configAccessKey = "config/access" func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixNomad, - }, - Fields: map[string]*framework.FieldSchema{ "address": { Type: framework.TypeString, @@ -53,35 +45,11 @@ must be x509 PEM encoded and if this is set you need to also set client_cert.`, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigAccessRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "read", - OperationSuffix: "access-configuration", - }, - }, - logical.CreateOperation: &framework.PathOperation{ - Callback: b.pathConfigAccessWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "access", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigAccessWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "access", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathConfigAccessDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "delete", - OperationSuffix: "access-configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathConfigAccessRead, + logical.CreateOperation: b.pathConfigAccessWrite, + logical.UpdateOperation: b.pathConfigAccessWrite, + logical.DeleteOperation: b.pathConfigAccessDelete, }, ExistenceCheck: b.configExistenceCheck, diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index 05c83ff938f8a..676e515cb84b5 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( @@ -16,11 +13,6 @@ const leaseConfigKey = "config/lease" func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixNomad, - }, - Fields: map[string]*framework.FieldSchema{ "ttl": { Type: framework.TypeDurationSecond, @@ -32,28 +24,10 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathLeaseRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "read", - OperationSuffix: "lease-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathLeaseUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "lease", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathLeaseDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "delete", - OperationSuffix: "lease-configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseUpdate, + logical.DeleteOperation: b.pathLeaseDelete, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index 29f84d136908d..14df1ff939db5 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( @@ -20,13 +17,6 @@ const maxTokenNameLength = 256 func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixNomad, - OperationVerb: "generate", - OperationSuffix: "credentials", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index e3cebefb52329..92109ba741230 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( @@ -16,11 +13,6 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixNomad, - OperationSuffix: "roles", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -30,12 +22,6 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixNomad, - OperationSuffix: "role", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 3c6b920681d37..fd446f7a6436f 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package nomad import ( diff --git a/builtin/logical/pki/acme_authorizations.go b/builtin/logical/pki/acme_authorizations.go deleted file mode 100644 index 82d439d88a540..0000000000000 --- a/builtin/logical/pki/acme_authorizations.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - "time" -) - -type ACMEIdentifierType string - -const ( - ACMEDNSIdentifier ACMEIdentifierType = "dns" - ACMEIPIdentifier ACMEIdentifierType = "ip" -) - -type ACMEIdentifier struct { - Type ACMEIdentifierType `json:"type"` - Value string `json:"value"` - OriginalValue string `json:"original_value"` - IsWildcard bool `json:"is_wildcard"` -} - -func (ai *ACMEIdentifier) MaybeParseWildcard() (bool, string, error) { - if ai.Type != ACMEDNSIdentifier || !isWildcardDomain(ai.Value) { - return false, ai.Value, nil - } - - // Here on out, technically it is a wildcard. - ai.IsWildcard = true - - wildcardLabel, reducedName, err := validateWildcardDomain(ai.Value) - if err != nil { - return true, "", err - } - - if wildcardLabel != "*" { - // Per RFC 8555 Section. 7.1.3. Order Objects: - // - // > Any identifier of type "dns" in a newOrder request MAY have a - // > wildcard domain name as its value. A wildcard domain name consists - // > of a single asterisk character followed by a single full stop - // > character ("*.") followed by a domain name as defined for use in the - // > Subject Alternate Name Extension by [RFC5280]. - return true, "", fmt.Errorf("wildcard must be entire left-most label") - } - - if reducedName == "" { - return true, "", fmt.Errorf("wildcard must not be entire domain name; need at least two domain labels") - } - - // Parsing was indeed successful, so update our reduced name. - ai.Value = reducedName - - return true, reducedName, nil -} - -func (ai *ACMEIdentifier) NetworkMarshal(useOriginalValue bool) map[string]interface{} { - value := ai.OriginalValue - if !useOriginalValue { - value = ai.Value - } - return map[string]interface{}{ - "type": ai.Type, - "value": value, - } -} - -type ACMEAuthorizationStatusType string - -const ( - ACMEAuthorizationPending ACMEAuthorizationStatusType = "pending" - ACMEAuthorizationValid ACMEAuthorizationStatusType = "valid" - ACMEAuthorizationInvalid ACMEAuthorizationStatusType = "invalid" - ACMEAuthorizationDeactivated ACMEAuthorizationStatusType = "deactivated" - ACMEAuthorizationExpired ACMEAuthorizationStatusType = "expired" - ACMEAuthorizationRevoked ACMEAuthorizationStatusType = "revoked" -) - -type ACMEOrderStatusType string - -const ( - ACMEOrderPending ACMEOrderStatusType = "pending" - ACMEOrderProcessing ACMEOrderStatusType = "processing" - ACMEOrderValid ACMEOrderStatusType = "valid" - ACMEOrderInvalid ACMEOrderStatusType = "invalid" - ACMEOrderReady ACMEOrderStatusType = "ready" -) - -type ACMEChallengeType string - -const ( - ACMEHTTPChallenge ACMEChallengeType = "http-01" - ACMEDNSChallenge ACMEChallengeType = "dns-01" - ACMEALPNChallenge ACMEChallengeType = "tls-alpn-01" -) - -type ACMEChallengeStatusType string - -const ( - ACMEChallengePending ACMEChallengeStatusType = "pending" - ACMEChallengeProcessing ACMEChallengeStatusType = "processing" - ACMEChallengeValid ACMEChallengeStatusType = "valid" - ACMEChallengeInvalid ACMEChallengeStatusType = "invalid" -) - -type ACMEChallenge struct { - Type ACMEChallengeType `json:"type"` - Status ACMEChallengeStatusType `json:"status"` - Validated string `json:"validated,optional"` - Error map[string]interface{} `json:"error,optional"` - ChallengeFields map[string]interface{} `json:"challenge_fields"` -} - -func (ac *ACMEChallenge) NetworkMarshal(acmeCtx *acmeContext, authId string) map[string]interface{} { - resp := map[string]interface{}{ - "type": ac.Type, - "url": buildChallengeUrl(acmeCtx, authId, string(ac.Type)), - "status": ac.Status, - } - - if ac.Validated != "" { - resp["validated"] = ac.Validated - } - - if len(ac.Error) > 0 { - resp["error"] = ac.Error - } - - for field, value := range ac.ChallengeFields { - resp[field] = value - } - - return resp -} - -func buildChallengeUrl(acmeCtx *acmeContext, authId, challengeType string) string { - return acmeCtx.baseUrl.JoinPath("/challenge/", authId, challengeType).String() -} - -type ACMEAuthorization struct { - Id string `json:"id"` - AccountId string `json:"account_id"` - - Identifier *ACMEIdentifier `json:"identifier"` - Status ACMEAuthorizationStatusType `json:"status"` - - // Per RFC 8555 Section 7.1.4. Authorization Objects: - // - // > This field is REQUIRED for objects with "valid" in the "status" - // > field. - Expires string `json:"expires,optional"` - - Challenges []*ACMEChallenge `json:"challenges"` - Wildcard bool `json:"wildcard"` -} - -func (aa *ACMEAuthorization) GetExpires() (time.Time, error) { - if aa.Expires == "" { - return time.Time{}, nil - } - - return time.Parse(time.RFC3339, aa.Expires) -} - -func (aa *ACMEAuthorization) NetworkMarshal(acmeCtx *acmeContext) map[string]interface{} { - resp := map[string]interface{}{ - "identifier": aa.Identifier.NetworkMarshal( /* use value, not original value */ false), - "status": aa.Status, - "wildcard": aa.Wildcard, - } - - if aa.Expires != "" { - resp["expires"] = aa.Expires - } - - if len(aa.Challenges) > 0 { - challenges := []map[string]interface{}{} - for _, challenge := range aa.Challenges { - challenges = append(challenges, challenge.NetworkMarshal(acmeCtx, aa.Id)) - } - resp["challenges"] = challenges - } - - return resp -} diff --git a/builtin/logical/pki/acme_billing.go b/builtin/logical/pki/acme_billing.go deleted file mode 100644 index 642e0f4fcdd38..0000000000000 --- a/builtin/logical/pki/acme_billing.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/logical" -) - -func (b *backend) doTrackBilling(ctx context.Context, identifiers []*ACMEIdentifier) error { - billingView, ok := b.System().(logical.ACMEBillingSystemView) - if !ok { - return fmt.Errorf("failed to perform cast to ACME billing system view interface") - } - - var realized []string - for _, identifier := range identifiers { - realized = append(realized, fmt.Sprintf("%s/%s", identifier.Type, identifier.OriginalValue)) - } - - return billingView.CreateActivityCountEventForIdentifiers(ctx, realized) -} diff --git a/builtin/logical/pki/acme_billing_test.go b/builtin/logical/pki/acme_billing_test.go deleted file mode 100644 index 3a240f1875d6a..0000000000000 --- a/builtin/logical/pki/acme_billing_test.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/json" - "strings" - "testing" - "time" - - "golang.org/x/crypto/acme" - - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/pki/dnstest" - "github.com/hashicorp/vault/helper/constants" - "github.com/hashicorp/vault/helper/timeutil" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/activity" - - "github.com/stretchr/testify/require" -) - -// TestACMEBilling is a basic test that will validate client counts created via ACME workflows. -func TestACMEBilling(t *testing.T) { - t.Parallel() - timeutil.SkipAtEndOfMonth(t) - - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - dns := dnstest.SetupResolver(t, "dadgarcorp.com") - defer dns.Cleanup() - - // Enable additional mounts. - setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki2") - setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns1/pki") - setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns2/pki") - - // Enable custom DNS resolver for testing. - for _, mount := range []string{"pki", "pki2", "ns1/pki", "ns2/pki"} { - _, err := client.Logical().Write(mount+"/config/acme", map[string]interface{}{ - "dns_resolver": dns.GetLocalAddr(), - }) - require.NoError(t, err, "failed to set local dns resolver address for testing on mount: "+mount) - } - - // Enable client counting. - _, err := client.Logical().Write("/sys/internal/counters/config", map[string]interface{}{ - "enabled": "enable", - }) - require.NoError(t, err, "failed to enable client counting") - - // Setup ACME clients. We refresh account keys each time for consistency. - acmeClientPKI := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", nil) - acmeClientPKI2 := getAcmeClientForCluster(t, cluster, "/v1/pki2/acme/", nil) - acmeClientPKINS1 := getAcmeClientForCluster(t, cluster, "/v1/ns1/pki/acme/", nil) - acmeClientPKINS2 := getAcmeClientForCluster(t, cluster, "/v1/ns2/pki/acme/", nil) - - // Get our initial count. - expectedCount := validateClientCount(t, client, "", -1, "initial fetch") - - // Unique identifier: should increase by one. - doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") - - // Different identifier; should increase by one. - doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") - - // While same identifiers, used together and so thus are unique; increase by one. - doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com", "dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") - - // Same identifiers in different order are not unique; keep the same. - doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com", "example.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "pki", expectedCount, "different order; same identifiers") - - // Using a different mount shouldn't affect counts. - doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "", expectedCount, "different mount; same identifiers") - - // But using a different identifier should. - doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"pki2.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "pki2", expectedCount+1, "different mount with different identifiers") - - // A new identifier in a unique namespace will affect results. - doACMEForDomainWithDNS(t, dns, acmeClientPKINS1, []string{"unique.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "ns1/pki", expectedCount+1, "unique identifier in a namespace") - - // But in a different namespace with the existing identifier will not. - doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"unique.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier in a namespace") - doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"unique.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier outside of a namespace") - - // Creating a unique identifier in a namespace with a mount with the - // same name as another namespace should increase counts as well. - doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"very-unique.dadgarcorp.com"}) - expectedCount = validateClientCount(t, client, "ns2/pki", expectedCount+1, "unique identifier in a different namespace") - - // Check the current fragment - fragment := cluster.Cores[0].Core.ResetActivityLog()[0] - if fragment == nil { - t.Fatal("no fragment created") - } - validateAcmeClientTypes(t, fragment, expectedCount) -} - -func validateAcmeClientTypes(t *testing.T, fragment *activity.LogFragment, expectedCount int64) { - t.Helper() - if int64(len(fragment.Clients)) != expectedCount { - t.Fatalf("bad number of entities, expected %v: got %v, entities are: %v", expectedCount, len(fragment.Clients), fragment.Clients) - } - - for _, ac := range fragment.Clients { - if ac.ClientType != vault.ACMEActivityType { - t.Fatalf("Couldn't find expected '%v' client_type in %v", vault.ACMEActivityType, fragment.Clients) - } - } -} - -func validateClientCount(t *testing.T, client *api.Client, mount string, expected int64, message string) int64 { - resp, err := client.Logical().Read("/sys/internal/counters/activity/monthly") - require.NoError(t, err, "failed to fetch client count values") - t.Logf("got client count numbers: %v", resp) - - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Contains(t, resp.Data, "non_entity_clients") - require.Contains(t, resp.Data, "months") - - rawCount := resp.Data["non_entity_clients"].(json.Number) - count, err := rawCount.Int64() - require.NoError(t, err, "failed to parse number as int64: "+rawCount.String()) - - if expected != -1 { - require.Equal(t, expected, count, "value of client counts did not match expectations: "+message) - } - - if mount == "" { - return count - } - - months := resp.Data["months"].([]interface{}) - if len(months) > 1 { - t.Fatalf("running across a month boundary despite using SkipAtEndOfMonth(...); rerun test from start fully in the next month instead") - } - - require.Equal(t, 1, len(months), "expected only a single month when running this test") - - monthlyInfo := months[0].(map[string]interface{}) - - // Validate this month's aggregate counts match the overall value. - require.Contains(t, monthlyInfo, "counts", "expected monthly info to contain a count key") - monthlyCounts := monthlyInfo["counts"].(map[string]interface{}) - require.Contains(t, monthlyCounts, "non_entity_clients", "expected month[0].counts to contain a non_entity_clients key") - monthlyCountNonEntityRaw := monthlyCounts["non_entity_clients"].(json.Number) - monthlyCountNonEntity, err := monthlyCountNonEntityRaw.Int64() - require.NoError(t, err, "failed to parse number as int64: "+monthlyCountNonEntityRaw.String()) - require.Equal(t, count, monthlyCountNonEntity, "expected equal values for non entity client counts") - - // Validate this mount's namespace is included in the namespaces list, - // if this is enterprise. Otherwise, if its OSS or we don't have a - // namespace, we default to the value root. - mountNamespace := "" - mountPath := mount + "/" - if constants.IsEnterprise && strings.Contains(mount, "/") { - pieces := strings.Split(mount, "/") - require.Equal(t, 2, len(pieces), "we do not support nested namespaces in this test") - mountNamespace = pieces[0] + "/" - mountPath = pieces[1] + "/" - } - - require.Contains(t, monthlyInfo, "namespaces", "expected monthly info to contain a namespaces key") - monthlyNamespaces := monthlyInfo["namespaces"].([]interface{}) - foundNamespace := false - for index, namespaceRaw := range monthlyNamespaces { - namespace := namespaceRaw.(map[string]interface{}) - require.Contains(t, namespace, "namespace_path", "expected monthly.namespaces[%v] to contain a namespace_path key", index) - namespacePath := namespace["namespace_path"].(string) - - if namespacePath != mountNamespace { - t.Logf("skipping non-matching namespace %v: %v != %v / %v", index, namespacePath, mountNamespace, namespace) - continue - } - - foundNamespace = true - - // This namespace must have a non-empty aggregate non-entity count. - require.Contains(t, namespace, "counts", "expected monthly.namespaces[%v] to contain a counts key", index) - namespaceCounts := namespace["counts"].(map[string]interface{}) - require.Contains(t, namespaceCounts, "non_entity_clients", "expected namespace counts to contain a non_entity_clients key") - namespaceCountNonEntityRaw := namespaceCounts["non_entity_clients"].(json.Number) - namespaceCountNonEntity, err := namespaceCountNonEntityRaw.Int64() - require.NoError(t, err, "failed to parse number as int64: "+namespaceCountNonEntityRaw.String()) - require.Greater(t, namespaceCountNonEntity, int64(0), "expected at least one non-entity client count value in the namespace") - - require.Contains(t, namespace, "mounts", "expected monthly.namespaces[%v] to contain a mounts key", index) - namespaceMounts := namespace["mounts"].([]interface{}) - foundMount := false - for mountIndex, mountRaw := range namespaceMounts { - mountInfo := mountRaw.(map[string]interface{}) - require.Contains(t, mountInfo, "mount_path", "expected monthly.namespaces[%v].mounts[%v] to contain a mount_path key", index, mountIndex) - mountInfoPath := mountInfo["mount_path"].(string) - if mountPath != mountInfoPath { - t.Logf("skipping non-matching mount path %v in namespace %v: %v != %v / %v of %v", mountIndex, index, mountPath, mountInfoPath, mountInfo, namespace) - continue - } - - foundMount = true - - // This mount must also have a non-empty non-entity client count. - require.Contains(t, mountInfo, "counts", "expected monthly.namespaces[%v].mounts[%v] to contain a counts key", index, mountIndex) - mountCounts := mountInfo["counts"].(map[string]interface{}) - require.Contains(t, mountCounts, "non_entity_clients", "expected mount counts to contain a non_entity_clients key") - mountCountNonEntityRaw := mountCounts["non_entity_clients"].(json.Number) - mountCountNonEntity, err := mountCountNonEntityRaw.Int64() - require.NoError(t, err, "failed to parse number as int64: "+mountCountNonEntityRaw.String()) - require.Greater(t, mountCountNonEntity, int64(0), "expected at least one non-entity client count value in the mount") - } - - require.True(t, foundMount, "expected to find the mount "+mountPath+" in the list of mounts for namespace, but did not") - } - - require.True(t, foundNamespace, "expected to find the namespace "+mountNamespace+" in the list of namespaces, but did not") - - return count -} - -func doACMEForDomainWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string) *x509.Certificate { - cr := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: domains[0]}, - DNSNames: domains, - } - - accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed to generate account key") - acmeClient.Key = accountKey - - testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancelFunc() - - // Register the client. - _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create the Order - var orderIdentifiers []acme.AuthzID - for _, domain := range domains { - orderIdentifiers = append(orderIdentifiers, acme.AuthzID{Type: "dns", Value: domain}) - } - order, err := acmeClient.AuthorizeOrder(testCtx, orderIdentifiers) - require.NoError(t, err, "failed creating ACME order") - - // Fetch its authorizations. - var auths []*acme.Authorization - for _, authUrl := range order.AuthzURLs { - authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) - auths = append(auths, authorization) - } - - // For each dns-01 challenge, place the record in the associated DNS resolver. - var challengesToAccept []*acme.Challenge - for _, auth := range auths { - for _, challenge := range auth.Challenges { - if challenge.Status != acme.StatusPending { - t.Logf("ignoring challenge not in status pending: %v", challenge) - continue - } - - if challenge.Type == "dns-01" { - challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) - require.NoError(t, err, "failed generating challenge response") - - dns.AddRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) - defer dns.RemoveRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) - - require.NoError(t, err, "failed setting DNS record") - - challengesToAccept = append(challengesToAccept, challenge) - } - } - } - - dns.PushConfig() - require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") - - // Tell the ACME server, that they can now validate those challenges. - for _, challenge := range challengesToAccept { - _, err = acmeClient.Accept(testCtx, challenge) - require.NoError(t, err, "failed to accept challenge: %v", challenge) - } - - // Wait for the order/challenges to be validated. - _, err = acmeClient.WaitOrder(testCtx, order.URI) - require.NoError(t, err, "failed waiting for order to be ready") - - // Create/sign the CSR and ask ACME server to sign it returning us the final certificate - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) - require.NoError(t, err, "failed to get a certificate back from ACME") - - acmeCert, err := x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert bytes") - - return acmeCert -} diff --git a/builtin/logical/pki/acme_challenge_engine.go b/builtin/logical/pki/acme_challenge_engine.go deleted file mode 100644 index 5103a0376e7e9..0000000000000 --- a/builtin/logical/pki/acme_challenge_engine.go +++ /dev/null @@ -1,535 +0,0 @@ -package pki - -import ( - "container/list" - "context" - "fmt" - "sync" - "time" - - "github.com/hashicorp/vault/sdk/logical" -) - -var MaxChallengeTimeout = 1 * time.Minute - -const MaxRetryAttempts = 5 - -const ChallengeAttemptFailedMsg = "this may occur if the validation target was misconfigured: check that challenge responses are available at the required locations and retry." - -type ChallengeValidation struct { - // Account KID that this validation attempt is recorded under. - Account string `json:"account"` - - // The authorization ID that this validation attempt is for. - Authorization string `json:"authorization"` - ChallengeType ACMEChallengeType `json:"challenge_type"` - - // The token of this challenge and the JWS thumbprint of the account - // we're validating against. - Token string `json:"token"` - Thumbprint string `json:"thumbprint"` - - Initiated time.Time `json:"initiated"` - FirstValidation time.Time `json:"first_validation,omitempty"` - RetryCount int `json:"retry_count,omitempty"` - LastRetry time.Time `json:"last_retry,omitempty"` - RetryAfter time.Time `json:"retry_after,omitempty"` -} - -type ChallengeQueueEntry struct { - Identifier string - RetryAfter time.Time -} - -type ACMEChallengeEngine struct { - NumWorkers int - - ValidationLock sync.Mutex - NewValidation chan string - Closing chan struct{} - Validations *list.List -} - -func NewACMEChallengeEngine() *ACMEChallengeEngine { - ace := &ACMEChallengeEngine{} - ace.NewValidation = make(chan string, 1) - ace.Closing = make(chan struct{}, 1) - ace.Validations = list.New() - ace.NumWorkers = 5 - - return ace -} - -func (ace *ACMEChallengeEngine) LoadFromStorage(b *backend, sc *storageContext) error { - items, err := sc.Storage.List(sc.Context, acmeValidationPrefix) - if err != nil { - return fmt.Errorf("failed loading list of validations from disk: %w", err) - } - - ace.ValidationLock.Lock() - defer ace.ValidationLock.Unlock() - - // Add them to our queue of validations to work through later. - foundExistingValidations := false - for _, item := range items { - ace.Validations.PushBack(&ChallengeQueueEntry{ - Identifier: item, - }) - foundExistingValidations = true - } - - if foundExistingValidations { - ace.NewValidation <- "existing" - } - - return nil -} - -func (ace *ACMEChallengeEngine) Run(b *backend, state *acmeState, sc *storageContext) { - // We load the existing ACME challenges within the Run thread to avoid - // delaying the PKI mount initialization - b.Logger().Debug("Loading existing challenge validations on disk") - err := ace.LoadFromStorage(b, sc) - if err != nil { - b.Logger().Error("failed loading existing ACME challenge validations:", "err", err) - } - - for true { - // err == nil on shutdown. - b.Logger().Debug("Starting ACME challenge validation engine") - err := ace._run(b, state) - if err != nil { - b.Logger().Error("Got unexpected error from ACME challenge validation engine", "err", err) - time.Sleep(1 * time.Second) - continue - } - break - } -} - -func (ace *ACMEChallengeEngine) _run(b *backend, state *acmeState) error { - // This runner uses a background context for storage operations: we don't - // want to tie it to a inbound request and we don't want to set a time - // limit, so create a fresh background context. - runnerSC := b.makeStorageContext(context.Background(), b.storage) - - // We want at most a certain number of workers operating to verify - // challenges. - var finishedWorkersChannels []chan bool - for true { - // Wait until we've got more work to do. - select { - case <-ace.Closing: - b.Logger().Debug("shutting down ACME challenge validation engine") - return nil - case <-ace.NewValidation: - } - - // First try to reap any finished workers. Read from their channels - // and if not finished yet, add to a fresh slice. - var newFinishedWorkersChannels []chan bool - for _, channel := range finishedWorkersChannels { - select { - case <-channel: - default: - // This channel had not been written to, indicating that the - // worker had not yet finished. - newFinishedWorkersChannels = append(newFinishedWorkersChannels, channel) - } - } - finishedWorkersChannels = newFinishedWorkersChannels - - // If we have space to take on another work item, do so. - firstIdentifier := "" - startedWork := false - now := time.Now() - for len(finishedWorkersChannels) < ace.NumWorkers { - var task *ChallengeQueueEntry - - // Find our next work item. We do all of these operations - // while holding the queue lock, hence some repeated checks - // afterwards. Out of this, we get a candidate task, using - // element == nil as a sentinel for breaking our parent - // loop. - ace.ValidationLock.Lock() - element := ace.Validations.Front() - if element != nil { - ace.Validations.Remove(element) - task = element.Value.(*ChallengeQueueEntry) - if !task.RetryAfter.IsZero() && now.Before(task.RetryAfter) { - // We cannot work on this element yet; remove it to - // the back of the queue. This allows us to potentially - // select the next item in the next iteration. - ace.Validations.PushBack(task) - } - - if firstIdentifier != "" && task.Identifier == firstIdentifier { - // We found and rejected this element before; exit the - // loop by "claiming" we didn't find any work. - element = nil - } else if firstIdentifier == "" { - firstIdentifier = task.Identifier - } - } - ace.ValidationLock.Unlock() - if element == nil { - // There was no more work to do to fill up the queue; exit - // this loop. - break - } - if now.Before(task.RetryAfter) { - // Here, while we found an element, we didn't want to - // completely exit the loop (perhaps it was our first time - // finding a work order), so retry without modifying - // firstIdentifier. - continue - } - - config, err := state.getConfigWithUpdate(runnerSC) - if err != nil { - return fmt.Errorf("failed fetching ACME configuration: %w", err) - } - - // Since this work item was valid, we won't expect to see it in - // the validation queue again until it is executed. Here, we - // want to avoid infinite looping above (if we removed the one - // valid item and the remainder are all not immediately - // actionable). At the worst, we'll spend a little more time - // looping through the queue until we hit a repeat. - firstIdentifier = "" - - // Here, we got a piece of work that is ready to check; create a - // channel and a new go routine and run it. Note that this still - // could have a RetryAfter date we're not aware of (e.g., if the - // cluster restarted as we do not read the entries there). - channel := make(chan bool, 1) - go ace.VerifyChallenge(runnerSC, task.Identifier, channel, config) - finishedWorkersChannels = append(finishedWorkersChannels, channel) - startedWork = true - } - - // If we have no more capacity for work, we should pause a little to - // let the system catch up. Additionally, if we only had - // non-actionable work items, we should pause until some time has - // elapsed: not too much that we potentially starve any new incoming - // items from validation, but not too short that we cause a busy loop. - if len(finishedWorkersChannels) == ace.NumWorkers || !startedWork { - time.Sleep(100 * time.Millisecond) - } - - // Lastly, if we have more work to do, re-trigger ourselves. - ace.ValidationLock.Lock() - if ace.Validations.Front() != nil { - select { - case ace.NewValidation <- "retry": - default: - } - } - ace.ValidationLock.Unlock() - } - - return fmt.Errorf("unexpectedly exited from ACMEChallengeEngine._run()") -} - -func (ace *ACMEChallengeEngine) AcceptChallenge(sc *storageContext, account string, authz *ACMEAuthorization, challenge *ACMEChallenge, thumbprint string) error { - name := authz.Id + "-" + string(challenge.Type) - path := acmeValidationPrefix + name - - entry, err := sc.Storage.Get(sc.Context, path) - if err == nil && entry != nil { - // Challenge already in the queue; exit without re-adding it. - return nil - } - - if authz.Status != ACMEAuthorizationPending { - return fmt.Errorf("%w: cannot accept already validated authorization %v (%v)", ErrMalformed, authz.Id, authz.Status) - } - - for _, otherChallenge := range authz.Challenges { - // We assume within an authorization we won't have multiple challenges of the same challenge type - // and we want to limit a single challenge being in a processing state to avoid race conditions - // failing one challenge and passing another. - if otherChallenge.Type != challenge.Type && otherChallenge.Status != ACMEChallengePending { - return fmt.Errorf("%w: only a single challenge within an authorization can be accepted (%v) in status %v", ErrMalformed, otherChallenge.Type, otherChallenge.Status) - } - - // The requested challenge can ping us to wake us up, so allow pending and currently processing statuses - if otherChallenge.Status != ACMEChallengePending && otherChallenge.Status != ACMEChallengeProcessing { - return fmt.Errorf("%w: challenge is in invalid state (%v) in authorization %v", ErrMalformed, challenge.Status, authz.Id) - } - } - - token := challenge.ChallengeFields["token"].(string) - - cv := &ChallengeValidation{ - Account: account, - Authorization: authz.Id, - ChallengeType: challenge.Type, - Token: token, - Thumbprint: thumbprint, - Initiated: time.Now(), - } - - json, err := logical.StorageEntryJSON(path, &cv) - if err != nil { - return fmt.Errorf("error creating challenge validation queue entry: %w", err) - } - - if err := sc.Storage.Put(sc.Context, json); err != nil { - return fmt.Errorf("error writing challenge validation entry: %w", err) - } - - if challenge.Status == ACMEChallengePending { - challenge.Status = ACMEChallengeProcessing - - authzPath := getAuthorizationPath(account, authz.Id) - if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { - return fmt.Errorf("error saving updated authorization %v: %w", authz.Id, err) - } - } - - ace.ValidationLock.Lock() - defer ace.ValidationLock.Unlock() - ace.Validations.PushBack(&ChallengeQueueEntry{ - Identifier: name, - }) - - select { - case ace.NewValidation <- name: - default: - } - - return nil -} - -func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id string, finished chan bool, config *acmeConfigEntry) { - sc, _ /* cancel func */ := runnerSc.WithFreshTimeout(MaxChallengeTimeout) - runnerSc.Backend.Logger().Debug("Starting verification of challenge", "id", id) - - if retry, retryAfter, err := ace._verifyChallenge(sc, id, config); err != nil { - // Because verification of this challenge failed, we need to retry - // it in the future. Log the error and re-add the item to the queue - // to try again later. - sc.Backend.Logger().Error(fmt.Sprintf("ACME validation failed for %v: %v", id, err)) - - if retry { - ace.ValidationLock.Lock() - defer ace.ValidationLock.Unlock() - ace.Validations.PushBack(&ChallengeQueueEntry{ - Identifier: id, - RetryAfter: retryAfter, - }) - - // Let the validator know there's a pending challenge. - select { - case ace.NewValidation <- id: - default: - } - } - - // We're the only producer on this channel and it has a buffer size - // of one element, so it is safe to directly write here. - finished <- true - return - } - - // We're the only producer on this channel and it has a buffer size of one - // element, so it is safe to directly write here. - finished <- false -} - -func (ace *ACMEChallengeEngine) _verifyChallenge(sc *storageContext, id string, config *acmeConfigEntry) (bool, time.Time, error) { - now := time.Now() - path := acmeValidationPrefix + id - challengeEntry, err := sc.Storage.Get(sc.Context, path) - if err != nil { - return true, now, fmt.Errorf("error loading challenge %v: %w", id, err) - } - - if challengeEntry == nil { - // Something must've successfully cleaned up our storage entry from - // under us. Assume we don't need to rerun, else the client will - // trigger us to re-run. - err = nil - return ace._verifyChallengeCleanup(sc, err, id) - } - - var cv *ChallengeValidation - if err := challengeEntry.DecodeJSON(&cv); err != nil { - return true, now, fmt.Errorf("error decoding challenge %v: %w", id, err) - } - - if now.Before(cv.RetryAfter) { - return true, cv.RetryAfter, fmt.Errorf("retrying challenge %v too soon", id) - } - - authzPath := getAuthorizationPath(cv.Account, cv.Authorization) - authz, err := loadAuthorizationAtPath(sc, authzPath) - if err != nil { - return true, now, fmt.Errorf("error loading authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) - } - - if authz.Status != ACMEAuthorizationPending { - // Something must've finished up this challenge for us. Assume we - // don't need to rerun and exit instead. - err = nil - return ace._verifyChallengeCleanup(sc, err, id) - } - - var challenge *ACMEChallenge - for _, authzChallenge := range authz.Challenges { - if authzChallenge.Type == cv.ChallengeType { - challenge = authzChallenge - break - } - } - - if challenge == nil { - err = fmt.Errorf("no challenge of type %v in authorization %v/%v for challenge %v", cv.ChallengeType, cv.Account, cv.Authorization, id) - return ace._verifyChallengeCleanup(sc, err, id) - } - - if challenge.Status != ACMEChallengePending && challenge.Status != ACMEChallengeProcessing { - err = fmt.Errorf("challenge is in invalid state %v in authorization %v/%v for challenge %v", challenge.Status, cv.Account, cv.Authorization, id) - return ace._verifyChallengeCleanup(sc, err, id) - } - - var valid bool - switch challenge.Type { - case ACMEHTTPChallenge: - if authz.Identifier.Type != ACMEDNSIdentifier && authz.Identifier.Type != ACMEIPIdentifier { - err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) - return ace._verifyChallengeCleanup(sc, err, id) - } - - if authz.Wildcard { - err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via http-01 challenge", cv.Account, cv.Authorization, id) - return ace._verifyChallengeCleanup(sc, err, id) - } - - valid, err = ValidateHTTP01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) - if err != nil { - err = fmt.Errorf("%w: error validating http-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) - return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) - } - case ACMEDNSChallenge: - if authz.Identifier.Type != ACMEDNSIdentifier { - err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) - return ace._verifyChallengeCleanup(sc, err, id) - } - - valid, err = ValidateDNS01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) - if err != nil { - err = fmt.Errorf("%w: error validating dns-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) - return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) - } - case ACMEALPNChallenge: - if authz.Identifier.Type != ACMEDNSIdentifier { - err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) - return ace._verifyChallengeCleanup(sc, err, id) - } - - if authz.Wildcard { - err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via tls-alpn-01 challenge", cv.Account, cv.Authorization, id) - return ace._verifyChallengeCleanup(sc, err, id) - } - - valid, err = ValidateTLSALPN01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) - if err != nil { - err = fmt.Errorf("%w: error validating tls-alpn-01 challenge %v: %s", ErrIncorrectResponse, id, err.Error()) - return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) - } - default: - err = fmt.Errorf("unsupported ACME challenge type %v for challenge %v", cv.ChallengeType, id) - return ace._verifyChallengeCleanup(sc, err, id) - } - - if !valid { - err = fmt.Errorf("%w: challenge failed with no additional information", ErrIncorrectResponse) - return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) - } - - // If we got here, the challenge verification was successful. Update - // the authorization appropriately. - expires := now.Add(15 * 24 * time.Hour) - challenge.Status = ACMEChallengeValid - challenge.Validated = now.Format(time.RFC3339) - challenge.Error = nil - authz.Status = ACMEAuthorizationValid - authz.Expires = expires.Format(time.RFC3339) - - if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { - err = fmt.Errorf("error saving updated (validated) authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) - return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) - } - - return ace._verifyChallengeCleanup(sc, nil, id) -} - -func (ace *ACMEChallengeEngine) _verifyChallengeRetry(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error, id string) (bool, time.Time, error) { - now := time.Now() - path := acmeValidationPrefix + id - - if err := updateChallengeStatus(sc, cv, authzPath, auth, challenge, verificationErr); err != nil { - return true, now, err - } - - if cv.RetryCount > MaxRetryAttempts { - err := fmt.Errorf("reached max error attempts for challenge %v: %w", id, verificationErr) - return ace._verifyChallengeCleanup(sc, err, id) - } - - if cv.FirstValidation.IsZero() { - cv.FirstValidation = now - } - cv.RetryCount += 1 - cv.LastRetry = now - cv.RetryAfter = now.Add(time.Duration(cv.RetryCount*5) * time.Second) - - json, jsonErr := logical.StorageEntryJSON(path, cv) - if jsonErr != nil { - return true, now, fmt.Errorf("error persisting updated challenge validation queue entry (error prior to retry, if any: %v): %w", verificationErr, jsonErr) - } - - if putErr := sc.Storage.Put(sc.Context, json); putErr != nil { - return true, now, fmt.Errorf("error writing updated challenge validation entry (error prior to retry, if any: %v): %w", verificationErr, putErr) - } - - if verificationErr != nil { - verificationErr = fmt.Errorf("retrying validation: %w", verificationErr) - } - - return true, cv.RetryAfter, verificationErr -} - -func updateChallengeStatus(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error) error { - if verificationErr != nil { - challengeError := TranslateErrorToErrorResponse(verificationErr) - challenge.Error = challengeError.MarshalForStorage() - } - - if cv.RetryCount > MaxRetryAttempts { - challenge.Status = ACMEChallengeInvalid - auth.Status = ACMEAuthorizationInvalid - } - - if err := saveAuthorizationAtPath(sc, authzPath, auth); err != nil { - return fmt.Errorf("error persisting authorization/challenge update: %w", err) - } - return nil -} - -func (ace *ACMEChallengeEngine) _verifyChallengeCleanup(sc *storageContext, err error, id string) (bool, time.Time, error) { - now := time.Now() - - // Remove our ChallengeValidation entry only. - if deleteErr := sc.Storage.Delete(sc.Context, acmeValidationPrefix+id); deleteErr != nil { - return true, now.Add(-1 * time.Second), fmt.Errorf("error deleting challenge %v (error prior to cleanup, if any: %v): %w", id, err, deleteErr) - } - - if err != nil { - err = fmt.Errorf("removing challenge validation attempt and not retrying %v; previous error: %w", id, err) - } - - return false, now, err -} diff --git a/builtin/logical/pki/acme_challenges.go b/builtin/logical/pki/acme_challenges.go deleted file mode 100644 index 855035729e97e..0000000000000 --- a/builtin/logical/pki/acme_challenges.go +++ /dev/null @@ -1,469 +0,0 @@ -package pki - -import ( - "bytes" - "context" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "fmt" - "io" - "net" - "net/http" - "strings" - "time" -) - -const ( - DNSChallengePrefix = "_acme-challenge." - ALPNProtocol = "acme-tls/1" -) - -// While this should be a constant, there's no way to do a low-level test of -// ValidateTLSALPN01Challenge without spinning up a complicated Docker -// instance to build a custom responder. Because we already have a local -// toolchain, it is far easier to drive this through Go tests with a custom -// (high) port, rather than requiring permission to bind to port 443 (root-run -// tests are even worse). -var ALPNPort = "443" - -// OID of the acmeIdentifier X.509 Certificate Extension. -var OIDACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} - -// ValidateKeyAuthorization validates that the given keyAuthz from a challenge -// matches our expectation, returning (true, nil) if so, or (false, err) if -// not. -func ValidateKeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { - parts := strings.Split(keyAuthz, ".") - if len(parts) != 2 { - return false, fmt.Errorf("invalid authorization: got %v parts, expected 2", len(parts)) - } - - tokenPart := parts[0] - thumbprintPart := parts[1] - - if token != tokenPart || thumbprint != thumbprintPart { - return false, fmt.Errorf("key authorization was invalid") - } - - return true, nil -} - -// ValidateSHA256KeyAuthorization validates that the given keyAuthz from a -// challenge matches our expectation, returning (true, nil) if so, or -// (false, err) if not. -// -// This is for use with DNS challenges, which require -func ValidateSHA256KeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { - authzContents := token + "." + thumbprint - checksum := sha256.Sum256([]byte(authzContents)) - expectedAuthz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - if keyAuthz != expectedAuthz { - return false, fmt.Errorf("sha256 key authorization was invalid") - } - - return true, nil -} - -func buildResolver(config *acmeConfigEntry) (*net.Resolver, error) { - if len(config.DNSResolver) == 0 { - return net.DefaultResolver, nil - } - - return &net.Resolver{ - PreferGo: true, - StrictErrors: false, - Dial: func(ctx context.Context, network, address string) (net.Conn, error) { - d := net.Dialer{ - Timeout: 10 * time.Second, - } - return d.DialContext(ctx, network, config.DNSResolver) - }, - }, nil -} - -func buildDialerConfig(config *acmeConfigEntry) (*net.Dialer, error) { - resolver, err := buildResolver(config) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %w", err) - } - - return &net.Dialer{ - Timeout: 10 * time.Second, - KeepAlive: -1 * time.Second, - Resolver: resolver, - }, nil -} - -// Validates a given ACME http-01 challenge against the specified domain, -// per RFC 8555. -// -// We attempt to be defensive here against timeouts, extra redirects, &c. -func ValidateHTTP01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { - path := "http://" + domain + "/.well-known/acme-challenge/" + token - dialer, err := buildDialerConfig(config) - if err != nil { - return false, fmt.Errorf("failed to build dialer: %w", err) - } - - transport := &http.Transport{ - // Only a single request is sent to this server as we do not do any - // batching of validation attempts. There is no need to do an HTTP - // KeepAlive as a result. - DisableKeepAlives: true, - MaxIdleConns: 1, - MaxIdleConnsPerHost: 1, - MaxConnsPerHost: 1, - IdleConnTimeout: 1 * time.Second, - - // We'd rather timeout and re-attempt validation later than hang - // too many validators waiting for slow hosts. - DialContext: dialer.DialContext, - ResponseHeaderTimeout: 10 * time.Second, - } - - maxRedirects := 10 - urlLength := 2000 - - client := &http.Client{ - Transport: transport, - CheckRedirect: func(req *http.Request, via []*http.Request) error { - if len(via)+1 >= maxRedirects { - return fmt.Errorf("http-01: too many redirects: %v", len(via)+1) - } - - reqUrlLen := len(req.URL.String()) - if reqUrlLen > urlLength { - return fmt.Errorf("http-01: redirect url length too long: %v", reqUrlLen) - } - - return nil - }, - } - - resp, err := client.Get(path) - if err != nil { - return false, fmt.Errorf("http-01: failed to fetch path %v: %w", path, err) - } - - // We provision a buffer which allows for a variable size challenge, some - // whitespace, and a detection gap for too long of a message. - minExpected := len(token) + 1 + len(thumbprint) - maxExpected := 512 - - defer resp.Body.Close() - - // Attempt to read the body, but don't do so infinitely. - body, err := io.ReadAll(io.LimitReader(resp.Body, int64(maxExpected+1))) - if err != nil { - return false, fmt.Errorf("http-01: unexpected error while reading body: %w", err) - } - - if len(body) > maxExpected { - return false, fmt.Errorf("http-01: response too large: received %v > %v bytes", len(body), maxExpected) - } - - if len(body) < minExpected { - return false, fmt.Errorf("http-01: response too small: received %v < %v bytes", len(body), minExpected) - } - - // Per RFC 8555 Section 8.3. HTTP Challenge: - // - // > The server SHOULD ignore whitespace characters at the end of the body. - keyAuthz := string(body) - keyAuthz = strings.TrimSpace(keyAuthz) - - // If we got here, we got no non-EOF error while reading. Try to validate - // the token because we're bounded by a reasonable amount of length. - return ValidateKeyAuthorization(keyAuthz, token, thumbprint) -} - -func ValidateDNS01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { - // Here, domain is the value from the post-wildcard-processed identifier. - // Per RFC 8555, no difference in validation occurs if a wildcard entry - // is requested or if a non-wildcard entry is requested. - // - // XXX: In this case the DNS server is operator controlled and is assumed - // to be less malicious so the default resolver is used. In the future, - // we'll want to use net.Resolver for two reasons: - // - // 1. To control the actual resolver via ACME configuration, - // 2. To use a context to set stricter timeout limits. - resolver, err := buildResolver(config) - if err != nil { - return false, fmt.Errorf("failed to build resolver: %w", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - name := DNSChallengePrefix + domain - results, err := resolver.LookupTXT(ctx, name) - if err != nil { - return false, fmt.Errorf("dns-01: failed to lookup TXT records for domain (%v) via resolver %v: %w", name, config.DNSResolver, err) - } - - for _, keyAuthz := range results { - ok, _ := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) - if ok { - return true, nil - } - } - - return false, fmt.Errorf("dns-01: challenge failed against %v records", len(results)) -} - -func ValidateTLSALPN01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { - // This RFC is defined in RFC 8737 Automated Certificate Management - // Environment (ACME) TLS Application‑Layer Protocol Negotiation - // (ALPN) Challenge Extension. - // - // This is conceptually similar to ValidateHTTP01Challenge, but - // uses a TLS connection on port 443 with the specified ALPN - // protocol. - - cfg := &tls.Config{ - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge, the name of the negotiated - // protocol is "acme-tls/1". - NextProtos: []string{ALPNProtocol}, - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > ... and an SNI extension containing only the domain name - // > being validated during the TLS handshake. - // - // According to the Go docs, setting this option (even though - // InsecureSkipVerify=true is also specified), allows us to - // set the SNI extension to this value. - ServerName: domain, - - VerifyConnection: func(connState tls.ConnectionState) error { - // We initiated a fresh connection with no session tickets; - // even if we did have a session ticket, we do not wish to - // use it. Verify that the server has not inadvertently - // reused connections between validation attempts or something. - if connState.DidResume { - return fmt.Errorf("server under test incorrectly reported that handshake was resumed when no session cache was provided; refusing to continue") - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The ACME server verifies that during the TLS handshake the - // > application-layer protocol "acme-tls/1" was successfully - // > negotiated (and that the ALPN extension contained only the - // > value "acme-tls/1"). - if connState.NegotiatedProtocol != ALPNProtocol { - return fmt.Errorf("server under test negotiated unexpected ALPN protocol %v", connState.NegotiatedProtocol) - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > and that the certificate returned - // - // Because this certificate MUST be self-signed (per earlier - // statement in RFC 8737 Section 3), there is no point in sending - // more than one certificate, and so we will err early here if - // we got more than one. - if len(connState.PeerCertificates) > 1 { - return fmt.Errorf("server under test returned multiple (%v) certificates when we expected only one", len(connState.PeerCertificates)) - } - cert := connState.PeerCertificates[0] - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The client prepares for validation by constructing a - // > self-signed certificate that MUST contain an acmeIdentifier - // > extension and a subjectAlternativeName extension [RFC5280]. - // - // Verify that this is a self-signed certificate that isn't signed - // by another certificate (i.e., with the same key material but - // different issuer). - if err := cert.CheckSignatureFrom(cert); err != nil { - return fmt.Errorf("server under test returned a non-self-signed certificate: %w", err) - } - if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { - return fmt.Errorf("server under test returned a non-self-signed certificate: invalid subject (%v) <-> issuer (%v) match", cert.Subject.String(), cert.Issuer.String()) - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The subjectAlternativeName extension MUST contain a single - // > dNSName entry where the value is the domain name being - // > validated. - // - // TODO: this does not validate that there are not other SANs - // with unknown (to Go) OIDs. - if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) > 0 || len(cert.IPAddresses) > 0 || len(cert.URIs) > 0 { - return fmt.Errorf("server under test returned a certificate with incorrect SANs") - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The comparison of dNSNames MUST be case insensitive - // > [RFC4343]. Note that as ACME doesn't support Unicode - // > identifiers, all dNSNames MUST be encoded using the rules - // > of [RFC3492]. - if !strings.EqualFold(cert.DNSNames[0], domain) { - return fmt.Errorf("server under test returned a certificate with unexpected identifier: %v", cert.DNSNames[0]) - } - - // Per above, verify that the acmeIdentifier extension is present - // exactly once and has the correct value. - var foundACMEId bool - for _, ext := range cert.Extensions { - if !ext.Id.Equal(OIDACMEIdentifier) { - continue - } - - // There must be only a single ACME extension. - if foundACMEId { - return fmt.Errorf("server under test returned a certificate with multiple acmeIdentifier extensions") - } - foundACMEId = true - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > a critical acmeIdentifier extension - if !ext.Critical { - return fmt.Errorf("server under test returned a certificate with an acmeIdentifier extension marked non-Critical") - } - - keyAuthz := string(ext.Value) - ok, err := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) - if !ok || err != nil { - return fmt.Errorf("server under test returned a certificate with an invalid key authorization (%w)", err) - } - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The ACME server verifies that ... the certificate returned - // > contains: ... a critical acmeIdentifier extension containing - // > the expected SHA-256 digest computed in step 1. - if !foundACMEId { - return fmt.Errorf("server under test returned a certificate without the required acmeIdentifier extension") - } - - // Remove the handled critical extension and validate that we - // have no additional critical extensions left unhandled. - var index int = -1 - for oidIndex, oid := range cert.UnhandledCriticalExtensions { - if oid.Equal(OIDACMEIdentifier) { - index = oidIndex - break - } - } - if index != -1 { - // Unlike the foundACMEId case, this is not a failure; if Go - // updates to "understand" this critical extension, we do not - // wish to fail. - cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[0:index], cert.UnhandledCriticalExtensions[index+1:]...) - } - if len(cert.UnhandledCriticalExtensions) > 0 { - return fmt.Errorf("server under test returned a certificate with additional unknown critical extensions (%v)", cert.UnhandledCriticalExtensions) - } - - // All good! - return nil - }, - - // We never want to resume a connection; do not provide session - // cache storage. - ClientSessionCache: nil, - - // Do not trust any system trusted certificates; we're going to be - // manually validating the chain, so specifying a non-empty pool - // here could only cause additional, unnecessary work. - RootCAs: x509.NewCertPool(), - - // Do not bother validating the client's chain; we know it should be - // self-signed. This also disables hostname verification, but we do - // this verification as part of VerifyConnection(...) ourselves. - // - // Per Go docs, this option is only safe in conjunction with - // VerifyConnection which we define above. - InsecureSkipVerify: true, - - // RFC 8737 Section 4. acme-tls/1 Protocol Definition: - // - // > ACME servers that implement "acme-tls/1" MUST only negotiate - // > TLS 1.2 [RFC5246] or higher when connecting to clients for - // > validation. - MinVersion: tls.VersionTLS12, - - // While RFC 8737 does not place restrictions around allowed cipher - // suites, we wish to restrict ourselves to secure defaults. Specify - // the Intermediate guideline from Mozilla's TLS config generator to - // disable obviously weak ciphers. - // - // See also: https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.7 - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - }, - } - - // Build a dialer using our custom DNS resolver, to ensure domains get - // resolved according to configuration. - dialer, err := buildDialerConfig(config) - if err != nil { - return false, fmt.Errorf("failed to build dialer: %w", err) - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > 2. The ACME server resolves the domain name being validated and - // > chooses one of the IP addresses returned for validation (the - // > server MAY validate against multiple addresses if more than - // > one is returned). - // > 3. The ACME server initiates a TLS connection to the chosen IP - // > address. This connection MUST use TCP port 443. - address := fmt.Sprintf("%v:"+ALPNPort, domain) - conn, err := dialer.Dial("tcp", address) - if err != nil { - return false, fmt.Errorf("tls-alpn-01: failed to dial host: %w", err) - } - - // Initiate the connection to the remote peer. - client := tls.Client(conn, cfg) - - // We intentionally swallow this error as it isn't useful to the - // underlying protocol we perform here. Notably, per RFC 8737 - // Section 4. acme-tls/1 Protocol Definition: - // - // > Once the handshake is completed, the client MUST NOT exchange - // > any further data with the server and MUST immediately close the - // > connection. ... Because of this, an ACME server MAY choose to - // > withhold authorization if either the certificate signature is - // > invalid or the handshake doesn't fully complete. - defer client.Close() - - // We wish to put time bounds on the total time the handshake can - // stall for, so build a connection context here. - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - // See note above about why we can allow Handshake to complete - // successfully. - if err := client.HandshakeContext(ctx); err != nil { - return false, fmt.Errorf("tls-alpn-01: failed to perform handshake: %w", err) - } - return true, nil -} diff --git a/builtin/logical/pki/acme_challenges_test.go b/builtin/logical/pki/acme_challenges_test.go deleted file mode 100644 index 3bcdf88141d79..0000000000000 --- a/builtin/logical/pki/acme_challenges_test.go +++ /dev/null @@ -1,703 +0,0 @@ -package pki - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "fmt" - "math/big" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/builtin/logical/pki/dnstest" - - "github.com/stretchr/testify/require" -) - -type keyAuthorizationTestCase struct { - keyAuthz string - token string - thumbprint string - shouldFail bool -} - -var keyAuthorizationTestCases = []keyAuthorizationTestCase{ - { - // Entirely empty - "", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Both empty - ".", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Not equal - "non-.non-", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Empty thumbprint - "non-.", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Empty token - ".non-", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Wrong order - "non-empty-thumbprint.non-empty-token", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Too many pieces - "one.two.three", - "non-empty-token", - "non-empty-thumbprint", - true, - }, - { - // Valid - "non-empty-token.non-empty-thumbprint", - "non-empty-token", - "non-empty-thumbprint", - false, - }, -} - -func TestAcmeValidateKeyAuthorization(t *testing.T) { - t.Parallel() - - for index, tc := range keyAuthorizationTestCases { - isValid, err := ValidateKeyAuthorization(tc.keyAuthz, tc.token, tc.thumbprint) - if !isValid && err == nil { - t.Fatalf("[%d] expected failure to give reason via err (%v / %v)", index, isValid, err) - } - - expectedValid := !tc.shouldFail - if expectedValid != isValid { - t.Fatalf("[%d] got ret=%v, expected ret=%v (shouldFail=%v)", index, isValid, expectedValid, tc.shouldFail) - } - } -} - -func TestAcmeValidateHTTP01Challenge(t *testing.T) { - t.Parallel() - - for index, tc := range keyAuthorizationTestCases { - validFunc := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(tc.keyAuthz)) - } - withPadding := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(" " + tc.keyAuthz + " ")) - } - withRedirect := func(w http.ResponseWriter, r *http.Request) { - if strings.Contains(r.URL.Path, "/.well-known/") { - http.Redirect(w, r, "/my-http-01-challenge-response", 301) - return - } - - w.Write([]byte(tc.keyAuthz)) - } - withSleep := func(w http.ResponseWriter, r *http.Request) { - // Long enough to ensure any excessively short timeouts are hit, - // not long enough to trigger a failure (hopefully). - time.Sleep(5 * time.Second) - w.Write([]byte(tc.keyAuthz)) - } - - validHandlers := []http.HandlerFunc{ - http.HandlerFunc(validFunc), http.HandlerFunc(withPadding), - http.HandlerFunc(withRedirect), http.HandlerFunc(withSleep), - } - - for handlerIndex, handler := range validHandlers { - func() { - ts := httptest.NewServer(handler) - defer ts.Close() - - host := ts.URL[7:] - isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) - if !isValid && err == nil { - t.Fatalf("[tc=%d/handler=%d] expected failure to give reason via err (%v / %v)", index, handlerIndex, isValid, err) - } - - expectedValid := !tc.shouldFail - if expectedValid != isValid { - t.Fatalf("[tc=%d/handler=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, handlerIndex, isValid, err, expectedValid, tc.shouldFail) - } - }() - } - } - - // Negative test cases for various HTTP-specific scenarios. - redirectLoop := func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, "/my-http-01-challenge-response", 301) - } - publicRedirect := func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, "http://hashicorp.com/", 301) - } - noData := func(w http.ResponseWriter, r *http.Request) {} - noContent := func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNoContent) - } - notFound := func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - } - simulateHang := func(w http.ResponseWriter, r *http.Request) { - time.Sleep(30 * time.Second) - w.Write([]byte("my-token.my-thumbprint")) - } - tooLarge := func(w http.ResponseWriter, r *http.Request) { - for i := 0; i < 512; i++ { - w.Write([]byte("my-token.my-thumbprint\n")) - } - } - - validHandlers := []http.HandlerFunc{ - http.HandlerFunc(redirectLoop), http.HandlerFunc(publicRedirect), - http.HandlerFunc(noData), http.HandlerFunc(noContent), - http.HandlerFunc(notFound), http.HandlerFunc(simulateHang), - http.HandlerFunc(tooLarge), - } - for handlerIndex, handler := range validHandlers { - func() { - ts := httptest.NewServer(handler) - defer ts.Close() - - host := ts.URL[7:] - isValid, err := ValidateHTTP01Challenge(host, "my-token", "my-thumbprint", &acmeConfigEntry{}) - if isValid || err == nil { - t.Fatalf("[handler=%d] expected failure validating challenge (%v / %v)", handlerIndex, isValid, err) - } - }() - } -} - -func TestAcmeValidateDNS01Challenge(t *testing.T) { - t.Parallel() - - host := "dadgarcorp.com" - resolver := dnstest.SetupResolver(t, host) - defer resolver.Cleanup() - - t.Logf("DNS Server Address: %v", resolver.GetLocalAddr()) - - config := &acmeConfigEntry{ - DNSResolver: resolver.GetLocalAddr(), - } - - for index, tc := range keyAuthorizationTestCases { - checksum := sha256.Sum256([]byte(tc.keyAuthz)) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - resolver.AddRecord(DNSChallengePrefix+host, "TXT", authz) - resolver.PushConfig() - - isValid, err := ValidateDNS01Challenge(host, tc.token, tc.thumbprint, config) - if !isValid && err == nil { - t.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) - } - - expectedValid := !tc.shouldFail - if expectedValid != isValid { - t.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) - } - - resolver.RemoveAllRecords() - } -} - -func TestAcmeValidateTLSALPN01Challenge(t *testing.T) { - // This test is not parallel because we modify ALPNPort to use a custom - // non-standard port _just for testing purposes_. - host := "localhost" - config := &acmeConfigEntry{} - - returnedProtocols := []string{ALPNProtocol} - var certificates []*x509.Certificate - var privateKey crypto.PrivateKey - - tlsCfg := &tls.Config{} - tlsCfg.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { - var retCfg tls.Config = *tlsCfg - retCfg.NextProtos = returnedProtocols - t.Logf("[alpn-server] returned protocol: %v", returnedProtocols) - return &retCfg, nil - } - tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - var ret tls.Certificate - for index, cert := range certificates { - ret.Certificate = append(ret.Certificate, cert.Raw) - if index == 0 { - ret.Leaf = cert - } - } - ret.PrivateKey = privateKey - t.Logf("[alpn-server] returned certificates: %v", ret) - return &ret, nil - } - - ln, err := tls.Listen("tcp", host+":0", tlsCfg) - require.NoError(t, err, "failed to listen with TLS config") - - doOneAccept := func() { - t.Logf("[alpn-server] starting accept...") - connRaw, err := ln.Accept() - require.NoError(t, err, "failed to accept TLS connection") - - t.Logf("[alpn-server] got connection...") - conn := tls.Server(connRaw.(*tls.Conn), tlsCfg) - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer func() { - t.Logf("[alpn-server] defer context cancel executing") - cancel() - }() - - t.Logf("[alpn-server] starting handshake...") - if err := conn.HandshakeContext(ctx); err != nil { - t.Logf("[alpn-server] got non-fatal error while handshaking connection: %v", err) - } - - t.Logf("[alpn-server] closing connection...") - if err := conn.Close(); err != nil { - t.Logf("[alpn-server] got non-fatal error while closing connection: %v", err) - } - } - - ALPNPort = strings.Split(ln.Addr().String(), ":")[1] - - type alpnTestCase struct { - name string - certificates []*x509.Certificate - privateKey crypto.PrivateKey - protocols []string - token string - thumbprint string - shouldFail bool - } - - var alpnTestCases []alpnTestCase - // Add all of our keyAuthorizationTestCases into alpnTestCases - for index, tc := range keyAuthorizationTestCases { - t.Logf("using keyAuthorizationTestCase [tc=%d] as alpnTestCase [tc=%d]...", index, len(alpnTestCases)) - // Properly encode the authorization. - checksum := sha256.Sum256([]byte(tc.keyAuthz)) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - // Build a self-signed certificate. - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - }, - Issuer: pkix.Name{ - CommonName: host, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(1), - DNSNames: []string{host}, - ExtraExtensions: []pkix.Extension{ - { - Id: OIDACMEIdentifier, - Critical: true, - Value: []byte(authz), - }, - }, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - require.NoError(t, err, "failed to create certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated certificate") - - newTc := alpnTestCase{ - name: fmt.Sprintf("keyAuthorizationTestCase[%d]", index), - certificates: []*x509.Certificate{cert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: tc.token, - thumbprint: tc.thumbprint, - shouldFail: tc.shouldFail, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: Longer chain - // Build a self-signed certificate. - rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating root private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "Root CA", - }, - Issuer: pkix.Name{ - CommonName: "Root CA", - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: rootKey.Public(), - SerialNumber: big.NewInt(1), - BasicConstraintsValid: true, - IsCA: true, - } - rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) - require.NoError(t, err, "failed to create root certificate") - rootCert, err := x509.ParseCertificate(rootCertBytes) - require.NoError(t, err, "failed to parse newly generated root certificate") - - // Compute our authorization. - checksum := sha256.Sum256([]byte("valid.valid")) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - // Build a leaf certificate which _could_ pass validation - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating leaf private key") - tmpl = &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - }, - Issuer: pkix.Name{ - CommonName: "Root CA", - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(2), - DNSNames: []string{host}, - ExtraExtensions: []pkix.Extension{ - { - Id: OIDACMEIdentifier, - Critical: true, - Value: []byte(authz), - }, - }, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, rootCert, key.Public(), rootKey) - require.NoError(t, err, "failed to create leaf certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated leaf certificate") - - newTc := alpnTestCase{ - name: "longer chain with valid leaf", - certificates: []*x509.Certificate{cert, rootCert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: true, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: cert without DNSSan - // Compute our authorization. - checksum := sha256.Sum256([]byte("valid.valid")) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - // Build a leaf certificate without a DNSSan - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating leaf private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - }, - Issuer: pkix.Name{ - CommonName: host, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(2), - // NO DNSNames - ExtraExtensions: []pkix.Extension{ - { - Id: OIDACMEIdentifier, - Critical: true, - Value: []byte(authz), - }, - }, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - require.NoError(t, err, "failed to create leaf certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated leaf certificate") - - newTc := alpnTestCase{ - name: "valid keyauthz without valid dnsname", - certificates: []*x509.Certificate{cert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: true, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: cert without matching DNSSan - // Compute our authorization. - checksum := sha256.Sum256([]byte("valid.valid")) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - // Build a leaf certificate which fails validation due to bad DNSName - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating leaf private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - }, - Issuer: pkix.Name{ - CommonName: host, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(2), - DNSNames: []string{host + ".dadgarcorp.com" /* not matching host! */}, - ExtraExtensions: []pkix.Extension{ - { - Id: OIDACMEIdentifier, - Critical: true, - Value: []byte(authz), - }, - }, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - require.NoError(t, err, "failed to create leaf certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated leaf certificate") - - newTc := alpnTestCase{ - name: "valid keyauthz without matching dnsname", - certificates: []*x509.Certificate{cert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: true, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: cert with additional SAN - // Compute our authorization. - checksum := sha256.Sum256([]byte("valid.valid")) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - // Build a leaf certificate which has an invalid additional SAN - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating leaf private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - }, - Issuer: pkix.Name{ - CommonName: host, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(2), - DNSNames: []string{host}, - EmailAddresses: []string{"webmaster@" + host}, /* unexpected */ - ExtraExtensions: []pkix.Extension{ - { - Id: OIDACMEIdentifier, - Critical: true, - Value: []byte(authz), - }, - }, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - require.NoError(t, err, "failed to create leaf certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated leaf certificate") - - newTc := alpnTestCase{ - name: "valid keyauthz with additional email SANs", - certificates: []*x509.Certificate{cert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: true, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: cert without CN - // Compute our authorization. - checksum := sha256.Sum256([]byte("valid.valid")) - authz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - // Build a leaf certificate which should pass validation - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating leaf private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{}, - Issuer: pkix.Name{}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(2), - DNSNames: []string{host}, - ExtraExtensions: []pkix.Extension{ - { - Id: OIDACMEIdentifier, - Critical: true, - Value: []byte(authz), - }, - }, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - require.NoError(t, err, "failed to create leaf certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated leaf certificate") - - newTc := alpnTestCase{ - name: "valid certificate; no Subject/Issuer (missing CN)", - certificates: []*x509.Certificate{cert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: false, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: cert without the extension - // Build a leaf certificate which should fail validation - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating leaf private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{}, - Issuer: pkix.Name{}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: key.Public(), - SerialNumber: big.NewInt(1), - DNSNames: []string{host}, - BasicConstraintsValid: true, - IsCA: true, - } - certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - require.NoError(t, err, "failed to create leaf certificate") - cert, err := x509.ParseCertificate(certBytes) - require.NoError(t, err, "failed to parse newly generated leaf certificate") - - newTc := alpnTestCase{ - name: "missing required acmeIdentifier extension", - certificates: []*x509.Certificate{cert}, - privateKey: key, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: true, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - { - // Test case: root without a leaf - // Build a self-signed certificate. - rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generating root private key") - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "Root CA", - }, - Issuer: pkix.Name{ - CommonName: "Root CA", - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - PublicKey: rootKey.Public(), - SerialNumber: big.NewInt(1), - BasicConstraintsValid: true, - IsCA: true, - } - rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) - require.NoError(t, err, "failed to create root certificate") - rootCert, err := x509.ParseCertificate(rootCertBytes) - require.NoError(t, err, "failed to parse newly generated root certificate") - - newTc := alpnTestCase{ - name: "root without leaf", - certificates: []*x509.Certificate{rootCert}, - privateKey: rootKey, - protocols: []string{ALPNProtocol}, - token: "valid", - thumbprint: "valid", - shouldFail: true, - } - alpnTestCases = append(alpnTestCases, newTc) - } - - for index, tc := range alpnTestCases { - t.Logf("\n\n[tc=%d/name=%s] starting validation", index, tc.name) - certificates = tc.certificates - privateKey = tc.privateKey - returnedProtocols = tc.protocols - - // Attempt to validate the challenge. - go doOneAccept() - isValid, err := ValidateTLSALPN01Challenge(host, tc.token, tc.thumbprint, config) - if !isValid && err == nil { - t.Fatalf("[tc=%d/name=%s] expected failure to give reason via err (%v / %v)", index, tc.name, isValid, err) - } - - expectedValid := !tc.shouldFail - if expectedValid != isValid { - t.Fatalf("[tc=%d/name=%s] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, tc.name, isValid, err, expectedValid, tc.shouldFail) - } else if err != nil { - t.Logf("[tc=%d/name=%s] got expected failure: err=%v", index, tc.name, err) - } - } -} diff --git a/builtin/logical/pki/acme_eab_policy.go b/builtin/logical/pki/acme_eab_policy.go deleted file mode 100644 index 9a96f3af269e5..0000000000000 --- a/builtin/logical/pki/acme_eab_policy.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - "strings" -) - -type EabPolicyName string - -const ( - eabPolicyNotRequired EabPolicyName = "not-required" - eabPolicyNewAccountRequired EabPolicyName = "new-account-required" - eabPolicyAlwaysRequired EabPolicyName = "always-required" -) - -func getEabPolicyByString(name string) (EabPolicy, error) { - lcName := strings.TrimSpace(strings.ToLower(name)) - switch lcName { - case string(eabPolicyNotRequired): - return getEabPolicyByName(eabPolicyNotRequired), nil - case string(eabPolicyNewAccountRequired): - return getEabPolicyByName(eabPolicyNewAccountRequired), nil - case string(eabPolicyAlwaysRequired): - return getEabPolicyByName(eabPolicyAlwaysRequired), nil - default: - return getEabPolicyByName(eabPolicyAlwaysRequired), fmt.Errorf("unknown eab policy name: %s", name) - } -} - -func getEabPolicyByName(name EabPolicyName) EabPolicy { - return EabPolicy{Name: name} -} - -type EabPolicy struct { - Name EabPolicyName -} - -// EnforceForNewAccount for new account creations, should we require an EAB. -func (ep EabPolicy) EnforceForNewAccount(eabData *eabType) error { - if (ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired) && eabData == nil { - return ErrExternalAccountRequired - } - - return nil -} - -// EnforceForExistingAccount for all operations within ACME, does the account being used require an EAB attached to it. -func (ep EabPolicy) EnforceForExistingAccount(account *acmeAccount) error { - if ep.Name == eabPolicyAlwaysRequired && account.Eab == nil { - return ErrExternalAccountRequired - } - - return nil -} - -// IsExternalAccountRequired for new accounts incoming does is an EAB required -func (ep EabPolicy) IsExternalAccountRequired() bool { - return ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired -} - -// OverrideEnvDisablingPublicAcme determines if ACME is enabled but the OS environment variable -// has said to disable public acme support, if we can override that environment variable to -// turn on ACME support -func (ep EabPolicy) OverrideEnvDisablingPublicAcme() bool { - return ep.Name == eabPolicyAlwaysRequired -} diff --git a/builtin/logical/pki/acme_errors.go b/builtin/logical/pki/acme_errors.go deleted file mode 100644 index 5b73d9d215e0b..0000000000000 --- a/builtin/logical/pki/acme_errors.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/sdk/logical" -) - -// Error prefix; see RFC 8555 Section 6.7. Errors. -const ErrorPrefix = "urn:ietf:params:acme:error:" -const ErrorContentType = "application/problem+json" - -// See RFC 8555 Section 6.7. Errors. -var ErrAccountDoesNotExist = errors.New("The request specified an account that does not exist") - -var ErrAcmeDisabled = errors.New("ACME feature is disabled") - -var ( - ErrAlreadyRevoked = errors.New("The request specified a certificate to be revoked that has already been revoked") - ErrBadCSR = errors.New("The CSR is unacceptable") - ErrBadNonce = errors.New("The client sent an unacceptable anti-replay nonce") - ErrBadPublicKey = errors.New("The JWS was signed by a public key the server does not support") - ErrBadRevocationReason = errors.New("The revocation reason provided is not allowed by the server") - ErrBadSignatureAlgorithm = errors.New("The JWS was signed with an algorithm the server does not support") - ErrCAA = errors.New("Certification Authority Authorization (CAA) records forbid the CA from issuing a certificate") - ErrCompound = errors.New("Specific error conditions are indicated in the 'subproblems' array") - ErrConnection = errors.New("The server could not connect to validation target") - ErrDNS = errors.New("There was a problem with a DNS query during identifier validation") - ErrExternalAccountRequired = errors.New("The request must include a value for the 'externalAccountBinding' field") - ErrIncorrectResponse = errors.New("Response received didn't match the challenge's requirements") - ErrInvalidContact = errors.New("A contact URL for an account was invalid") - ErrMalformed = errors.New("The request message was malformed") - ErrOrderNotReady = errors.New("The request attempted to finalize an order that is not ready to be finalized") - ErrRateLimited = errors.New("The request exceeds a rate limit") - ErrRejectedIdentifier = errors.New("The server will not issue certificates for the identifier") - ErrServerInternal = errors.New("The server experienced an internal error") - ErrTLS = errors.New("The server received a TLS error during validation") - ErrUnauthorized = errors.New("The client lacks sufficient authorization") - ErrUnsupportedContact = errors.New("A contact URL for an account used an unsupported protocol scheme") - ErrUnsupportedIdentifier = errors.New("An identifier is of an unsupported type") - ErrUserActionRequired = errors.New("Visit the 'instance' URL and take actions specified there") -) - -// Mapping of err->name; see table in RFC 8555 Section 6.7. Errors. -var errIdMappings = map[error]string{ - ErrAccountDoesNotExist: "accountDoesNotExist", - ErrAlreadyRevoked: "alreadyRevoked", - ErrBadCSR: "badCSR", - ErrBadNonce: "badNonce", - ErrBadPublicKey: "badPublicKey", - ErrBadRevocationReason: "badRevocationReason", - ErrBadSignatureAlgorithm: "badSignatureAlgorithm", - ErrCAA: "caa", - ErrCompound: "compound", - ErrConnection: "connection", - ErrDNS: "dns", - ErrExternalAccountRequired: "externalAccountRequired", - ErrIncorrectResponse: "incorrectResponse", - ErrInvalidContact: "invalidContact", - ErrMalformed: "malformed", - ErrOrderNotReady: "orderNotReady", - ErrRateLimited: "rateLimited", - ErrRejectedIdentifier: "rejectedIdentifier", - ErrServerInternal: "serverInternal", - ErrTLS: "tls", - ErrUnauthorized: "unauthorized", - ErrUnsupportedContact: "unsupportedContact", - ErrUnsupportedIdentifier: "unsupportedIdentifier", - ErrUserActionRequired: "userActionRequired", -} - -// Mapping of err->status codes; see table in RFC 8555 Section 6.7. Errors. -var errCodeMappings = map[error]int{ - ErrAccountDoesNotExist: http.StatusBadRequest, // See RFC 8555 Section 7.3.1. Finding an Account URL Given a Key. - ErrAlreadyRevoked: http.StatusBadRequest, - ErrBadCSR: http.StatusBadRequest, - ErrBadNonce: http.StatusBadRequest, - ErrBadPublicKey: http.StatusBadRequest, - ErrBadRevocationReason: http.StatusBadRequest, - ErrBadSignatureAlgorithm: http.StatusBadRequest, - ErrCAA: http.StatusForbidden, - ErrCompound: http.StatusBadRequest, - ErrConnection: http.StatusInternalServerError, - ErrDNS: http.StatusInternalServerError, - ErrExternalAccountRequired: http.StatusUnauthorized, - ErrIncorrectResponse: http.StatusBadRequest, - ErrInvalidContact: http.StatusBadRequest, - ErrMalformed: http.StatusBadRequest, - ErrOrderNotReady: http.StatusForbidden, // See RFC 8555 Section 7.4. Applying for Certificate Issuance. - ErrRateLimited: http.StatusTooManyRequests, - ErrRejectedIdentifier: http.StatusBadRequest, - ErrServerInternal: http.StatusInternalServerError, - ErrTLS: http.StatusInternalServerError, - ErrUnauthorized: http.StatusUnauthorized, - ErrUnsupportedContact: http.StatusBadRequest, - ErrUnsupportedIdentifier: http.StatusBadRequest, - ErrUserActionRequired: http.StatusUnauthorized, -} - -type ErrorResponse struct { - StatusCode int `json:"-"` - Type string `json:"type"` - Detail string `json:"detail"` - Subproblems []*ErrorResponse `json:"subproblems"` -} - -func (e *ErrorResponse) MarshalForStorage() map[string]interface{} { - subProblems := []map[string]interface{}{} - for _, subProblem := range e.Subproblems { - subProblems = append(subProblems, subProblem.MarshalForStorage()) - } - return map[string]interface{}{ - "status": e.StatusCode, - "type": e.Type, - "detail": e.Detail, - "subproblems": subProblems, - } -} - -func (e *ErrorResponse) Marshal() (*logical.Response, error) { - body, err := json.Marshal(e) - if err != nil { - return nil, fmt.Errorf("failed marshalling of error response: %w", err) - } - - var resp logical.Response - resp.Data = map[string]interface{}{ - logical.HTTPContentType: ErrorContentType, - logical.HTTPRawBody: body, - logical.HTTPStatusCode: e.StatusCode, - } - - return &resp, nil -} - -func FindType(given error) (err error, id string, code int, found bool) { - matchedError := false - for err, id = range errIdMappings { - if errors.Is(given, err) { - matchedError = true - break - } - } - - // If the given error was not matched from one of the standard ACME errors - // make this error, force ErrServerInternal - if !matchedError { - err = ErrServerInternal - id = errIdMappings[err] - } - - code = errCodeMappings[err] - - return -} - -func TranslateError(given error) (*logical.Response, error) { - if errors.Is(given, logical.ErrReadOnly) { - return nil, given - } - - if errors.Is(given, ErrAcmeDisabled) { - return logical.RespondWithStatusCode(nil, nil, http.StatusNotFound) - } - - body := TranslateErrorToErrorResponse(given) - - return body.Marshal() -} - -func TranslateErrorToErrorResponse(given error) ErrorResponse { - // We're multierror aware here: if we're given a list of errors, assume - // they're structured so the first error is the outer error and the inner - // subproblems are subsequent in the multierror. - var remaining []error - if unwrapped, ok := given.(*multierror.Error); ok { - remaining = unwrapped.Errors[1:] - given = unwrapped.Errors[0] - } - - _, id, code, found := FindType(given) - if !found && len(remaining) > 0 { - // Translate multierrors into a generic error code. - id = errIdMappings[ErrCompound] - code = errCodeMappings[ErrCompound] - } - - var body ErrorResponse - body.Type = ErrorPrefix + id - body.Detail = given.Error() - body.StatusCode = code - - for _, subgiven := range remaining { - _, subid, _, _ := FindType(subgiven) - - var sub ErrorResponse - sub.Type = ErrorPrefix + subid - body.Detail = subgiven.Error() - - body.Subproblems = append(body.Subproblems, &sub) - } - return body -} diff --git a/builtin/logical/pki/acme_jws.go b/builtin/logical/pki/acme_jws.go deleted file mode 100644 index 3f6ba6d27187c..0000000000000 --- a/builtin/logical/pki/acme_jws.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "bytes" - "crypto" - "encoding/base64" - "encoding/json" - "fmt" - "strings" - - "github.com/go-jose/go-jose/v3" -) - -var AllowedOuterJWSTypes = map[string]interface{}{ - "RS256": true, - "RS384": true, - "RS512": true, - "PS256": true, - "PS384": true, - "PS512": true, - "ES256": true, - "ES384": true, - "ES512": true, - "EdDSA2": true, -} - -var AllowedEabJWSTypes = map[string]interface{}{ - "HS256": true, - "HS384": true, - "HS512": true, -} - -// This wraps a JWS message structure. -type jwsCtx struct { - Algo string `json:"alg"` - Kid string `json:"kid"` - Jwk json.RawMessage `json:"jwk"` - Nonce string `json:"nonce"` - Url string `json:"url"` - Key jose.JSONWebKey `json:"-"` - Existing bool `json:"-"` -} - -func (c *jwsCtx) GetKeyThumbprint() (string, error) { - keyThumbprint, err := c.Key.Thumbprint(crypto.SHA256) - if err != nil { - return "", fmt.Errorf("failed creating thumbprint: %w", err) - } - return base64.RawURLEncoding.EncodeToString(keyThumbprint), nil -} - -func UnmarshalEabJwsJson(eabBytes []byte) (*jwsCtx, error) { - var eabJws jwsCtx - var err error - if err = json.Unmarshal(eabBytes, &eabJws); err != nil { - return nil, err - } - - if eabJws.Kid == "" { - return nil, fmt.Errorf("invalid header: got missing required field 'kid': %w", ErrMalformed) - } - - if _, present := AllowedEabJWSTypes[eabJws.Algo]; !present { - return nil, fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) - } - - return &eabJws, nil -} - -func (c *jwsCtx) UnmarshalOuterJwsJson(a *acmeState, ac *acmeContext, jws []byte) error { - var err error - if err = json.Unmarshal(jws, c); err != nil { - return err - } - - if c.Kid != "" && len(c.Jwk) > 0 { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The "jwk" and "kid" fields are mutually exclusive. Servers MUST - // > reject requests that contain both. - return fmt.Errorf("invalid header: got both account 'kid' and 'jwk' in the same message; expected only one: %w", ErrMalformed) - } - - if c.Kid == "" && len(c.Jwk) == 0 { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > Either "jwk" (JSON Web Key) or "kid" (Key ID) as specified - // > below - return fmt.Errorf("invalid header: got neither required fields of 'kid' nor 'jwk': %w", ErrMalformed) - } - - if _, present := AllowedOuterJWSTypes[c.Algo]; !present { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The JWS Protected Header MUST include the following fields: - // > - // > - "alg" (Algorithm) - // > - // > * This field MUST NOT contain "none" or a Message - // > Authentication Code (MAC) algorithm (e.g. one in which the - // > algorithm registry description mentions MAC/HMAC). - return fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) - } - - if c.Kid != "" { - // Load KID from storage first. - kid := getKeyIdFromAccountUrl(c.Kid) - c.Jwk, err = a.LoadJWK(ac, kid) - if err != nil { - return err - } - c.Kid = kid // Use the uuid itself, not the full account url that was originally provided to us. - c.Existing = true - } - - if err = c.Key.UnmarshalJSON(c.Jwk); err != nil { - return err - } - - if !c.Key.Valid() { - return fmt.Errorf("received invalid jwk: %w", ErrMalformed) - } - - if c.Kid == "" { - c.Kid = genUuid() - c.Existing = false - } - - return nil -} - -func getKeyIdFromAccountUrl(accountUrl string) string { - pieces := strings.Split(accountUrl, "/") - return pieces[len(pieces)-1] -} - -func hasValues(h jose.Header) bool { - return h.KeyID != "" || h.JSONWebKey != nil || h.Algorithm != "" || h.Nonce != "" || len(h.ExtraHeaders) > 0 -} - -func (c *jwsCtx) VerifyJWS(signature string) (map[string]interface{}, error) { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The JWS Unencoded Payload Option [RFC7797] MUST NOT be used - // - // This is validated by go-jose. - sig, err := jose.ParseSigned(signature) - if err != nil { - return nil, fmt.Errorf("error parsing signature: %s: %w", err, ErrMalformed) - } - - if len(sig.Signatures) > 1 { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The JWS MUST NOT have multiple signatures - return nil, fmt.Errorf("request had multiple signatures: %w", ErrMalformed) - } - - if hasValues(sig.Signatures[0].Unprotected) { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The JWS Unprotected Header [RFC7515] MUST NOT be used - return nil, fmt.Errorf("request had unprotected headers: %w", ErrMalformed) - } - - payload, err := sig.Verify(c.Key) - if err != nil { - return nil, err - } - - if len(payload) == 0 { - // Distinguish POST-AS-GET from POST-with-an-empty-body. - return nil, nil - } - - var m map[string]interface{} - if err := json.Unmarshal(payload, &m); err != nil { - return nil, fmt.Errorf("failed to json unmarshal 'payload': %s: %w", err, ErrMalformed) - } - - return m, nil -} - -func verifyEabPayload(acmeState *acmeState, ac *acmeContext, outer *jwsCtx, expectedPath string, payload map[string]interface{}) (*eabType, error) { - // Parse the key out. - rawProtectedBase64, ok := payload["protected"] - if !ok { - return nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) - } - jwkBase64 := rawProtectedBase64.(string) - - jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) - if err != nil { - return nil, fmt.Errorf("failed to base64 parse eab 'protected': %s: %w", err, ErrMalformed) - } - - eabJws, err := UnmarshalEabJwsJson(jwkBytes) - if err != nil { - return nil, fmt.Errorf("failed to json unmarshal eab 'protected': %w", err) - } - - if len(eabJws.Url) == 0 { - return nil, fmt.Errorf("missing required parameter 'url' in eab 'protected': %w", ErrMalformed) - } - expectedUrl := ac.clusterUrl.JoinPath(expectedPath).String() - if expectedUrl != eabJws.Url { - return nil, fmt.Errorf("invalid value for 'url' in eab 'protected': got '%v' expected '%v': %w", eabJws.Url, expectedUrl, ErrUnauthorized) - } - - if len(eabJws.Nonce) != 0 { - return nil, fmt.Errorf("nonce should not be provided in eab 'protected': %w", ErrMalformed) - } - - rawPayloadBase64, ok := payload["payload"] - if !ok { - return nil, fmt.Errorf("missing required field eab 'payload': %w", ErrMalformed) - } - payloadBase64, ok := rawPayloadBase64.(string) - if !ok { - return nil, fmt.Errorf("failed to parse 'payload' field: %w", ErrMalformed) - } - - rawSignatureBase64, ok := payload["signature"] - if !ok { - return nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) - } - signatureBase64, ok := rawSignatureBase64.(string) - if !ok { - return nil, fmt.Errorf("failed to parse 'signature' field: %w", ErrMalformed) - } - - // go-jose only seems to support compact signature encodings. - compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) - sig, err := jose.ParseSigned(compactSig) - if err != nil { - return nil, fmt.Errorf("error parsing eab signature: %s: %w", err, ErrMalformed) - } - - if len(sig.Signatures) > 1 { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The JWS MUST NOT have multiple signatures - return nil, fmt.Errorf("eab had multiple signatures: %w", ErrMalformed) - } - - if hasValues(sig.Signatures[0].Unprotected) { - // See RFC 8555 Section 6.2. Request Authentication: - // - // > The JWS Unprotected Header [RFC7515] MUST NOT be used - return nil, fmt.Errorf("eab had unprotected headers: %w", ErrMalformed) - } - - // Load the EAB to validate the signature against - eabEntry, err := acmeState.LoadEab(ac.sc, eabJws.Kid) - if err != nil { - return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) - } - - verifiedPayload, err := sig.Verify(eabEntry.PrivateBytes) - if err != nil { - return nil, err - } - - // Make sure how eab payload matches the outer JWK key value - if !bytes.Equal(outer.Jwk, verifiedPayload) { - return nil, fmt.Errorf("eab payload does not match outer JWK key: %w", ErrMalformed) - } - - if eabEntry.AcmeDirectory != ac.acmeDirectory { - // This EAB was not created for this specific ACME directory, reject it - return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) - } - - return eabEntry, nil -} diff --git a/builtin/logical/pki/acme_state.go b/builtin/logical/pki/acme_state.go deleted file mode 100644 index c026de3d5cb5f..0000000000000 --- a/builtin/logical/pki/acme_state.go +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "path" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/go-secure-stdlib/nonceutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - // How many bytes are in a token. Per RFC 8555 Section - // 8.3. HTTP Challenge and Section 11.3 Token Entropy: - // - // > token (required, string): A random value that uniquely identifies - // > the challenge. This value MUST have at least 128 bits of entropy. - tokenBytes = 128 / 8 - - // Path Prefixes - acmePathPrefix = "acme/" - acmeAccountPrefix = acmePathPrefix + "accounts/" - acmeThumbprintPrefix = acmePathPrefix + "account-thumbprints/" - acmeValidationPrefix = acmePathPrefix + "validations/" - acmeEabPrefix = acmePathPrefix + "eab/" -) - -type acmeState struct { - nonces nonceutil.NonceService - - validator *ACMEChallengeEngine - - configDirty *atomic.Bool - _config sync.RWMutex - config acmeConfigEntry -} - -type acmeThumbprint struct { - Kid string `json:"kid"` - Thumbprint string `json:"-"` -} - -func NewACMEState() *acmeState { - state := &acmeState{ - nonces: nonceutil.NewNonceService(), - validator: NewACMEChallengeEngine(), - configDirty: new(atomic.Bool), - } - // Config hasn't been loaded yet; mark dirty. - state.configDirty.Store(true) - - return state -} - -func (a *acmeState) Initialize(b *backend, sc *storageContext) error { - // Initialize the nonce service. - if err := a.nonces.Initialize(); err != nil { - return fmt.Errorf("failed to initialize the ACME nonce service: %w", err) - } - - // Load the ACME config. - _, err := a.getConfigWithUpdate(sc) - if err != nil { - return fmt.Errorf("error initializing ACME engine: %w", err) - } - - // Kick off our ACME challenge validation engine. - go a.validator.Run(b, a, sc) - - // All good. - return nil -} - -func (a *acmeState) markConfigDirty() { - a.configDirty.Store(true) -} - -func (a *acmeState) reloadConfigIfRequired(sc *storageContext) error { - if !a.configDirty.Load() { - return nil - } - - a._config.Lock() - defer a._config.Unlock() - - if !a.configDirty.Load() { - // Someone beat us to grabbing the above write lock and already - // updated the config. - return nil - } - - config, err := sc.getAcmeConfig() - if err != nil { - return fmt.Errorf("failed reading config: %w", err) - } - - a.config = *config - a.configDirty.Store(false) - - return nil -} - -func (a *acmeState) getConfigWithUpdate(sc *storageContext) (*acmeConfigEntry, error) { - if err := a.reloadConfigIfRequired(sc); err != nil { - return nil, err - } - - a._config.RLock() - defer a._config.RUnlock() - - configCopy := a.config - return &configCopy, nil -} - -func generateRandomBase64(srcBytes int) (string, error) { - data := make([]byte, 21) - if _, err := io.ReadFull(rand.Reader, data); err != nil { - return "", err - } - - return base64.RawURLEncoding.EncodeToString(data), nil -} - -func (a *acmeState) GetNonce() (string, time.Time, error) { - return a.nonces.Get() -} - -func (a *acmeState) RedeemNonce(nonce string) bool { - return a.nonces.Redeem(nonce) -} - -func (a *acmeState) DoTidyNonces() { - a.nonces.Tidy() -} - -type ACMEAccountStatus string - -func (aas ACMEAccountStatus) String() string { - return string(aas) -} - -const ( - AccountStatusValid ACMEAccountStatus = "valid" - AccountStatusDeactivated ACMEAccountStatus = "deactivated" - AccountStatusRevoked ACMEAccountStatus = "revoked" -) - -type acmeAccount struct { - KeyId string `json:"-"` - Status ACMEAccountStatus `json:"status"` - Contact []string `json:"contact"` - TermsOfServiceAgreed bool `json:"terms-of-service-agreed"` - Jwk []byte `json:"jwk"` - AcmeDirectory string `json:"acme-directory"` - AccountCreatedDate time.Time `json:"account-created-date"` - MaxCertExpiry time.Time `json:"account-max-cert-expiry"` - AccountRevokedDate time.Time `json:"account-revoked-date"` - Eab *eabType `json:"eab"` -} - -type acmeOrder struct { - OrderId string `json:"-"` - AccountId string `json:"account-id"` - Status ACMEOrderStatusType `json:"status"` - Expires time.Time `json:"expires"` - Identifiers []*ACMEIdentifier `json:"identifiers"` - AuthorizationIds []string `json:"authorization-ids"` - CertificateSerialNumber string `json:"cert-serial-number"` - CertificateExpiry time.Time `json:"cert-expiry"` - // The actual issuer UUID that issued the certificate, blank if an order exists but no certificate was issued. - IssuerId issuerID `json:"issuer-id"` -} - -func (o acmeOrder) getIdentifierDNSValues() []string { - var identifiers []string - for _, value := range o.Identifiers { - if value.Type == ACMEDNSIdentifier { - // Here, because of wildcard processing, we need to use the - // original value provided by the caller rather than the - // post-modification (trimmed '*.' prefix) value. - identifiers = append(identifiers, value.OriginalValue) - } - } - return identifiers -} - -func (o acmeOrder) getIdentifierIPValues() []net.IP { - var identifiers []net.IP - for _, value := range o.Identifiers { - if value.Type == ACMEIPIdentifier { - identifiers = append(identifiers, net.ParseIP(value.Value)) - } - } - return identifiers -} - -func (a *acmeState) CreateAccount(ac *acmeContext, c *jwsCtx, contact []string, termsOfServiceAgreed bool, eab *eabType) (*acmeAccount, error) { - // Write out the thumbprint value/entry out first, if we get an error mid-way through - // this is easier to recover from. The new kid with the same existing public key - // will rewrite the thumbprint entry. This goes in hand with LoadAccountByKey that - // will return a nil, nil value if the referenced kid in a loaded thumbprint does not - // exist. This effectively makes this self-healing IF the end-user re-attempts the - // account creation with the same public key. - thumbprint, err := c.GetKeyThumbprint() - if err != nil { - return nil, fmt.Errorf("failed generating thumbprint: %w", err) - } - - thumbPrint := &acmeThumbprint{ - Kid: c.Kid, - Thumbprint: thumbprint, - } - thumbPrintEntry, err := logical.StorageEntryJSON(acmeThumbprintPrefix+thumbprint, thumbPrint) - if err != nil { - return nil, fmt.Errorf("error generating account thumbprint entry: %w", err) - } - - if err = ac.sc.Storage.Put(ac.sc.Context, thumbPrintEntry); err != nil { - return nil, fmt.Errorf("error writing account thumbprint entry: %w", err) - } - - // Now write out the main value that the thumbprint points too. - acct := &acmeAccount{ - KeyId: c.Kid, - Contact: contact, - TermsOfServiceAgreed: termsOfServiceAgreed, - Jwk: c.Jwk, - Status: AccountStatusValid, - AcmeDirectory: ac.acmeDirectory, - AccountCreatedDate: time.Now(), - Eab: eab, - } - json, err := logical.StorageEntryJSON(acmeAccountPrefix+c.Kid, acct) - if err != nil { - return nil, fmt.Errorf("error creating account entry: %w", err) - } - - if err := ac.sc.Storage.Put(ac.sc.Context, json); err != nil { - return nil, fmt.Errorf("error writing account entry: %w", err) - } - - return acct, nil -} - -func (a *acmeState) UpdateAccount(ac *acmeContext, acct *acmeAccount) error { - json, err := logical.StorageEntryJSON(acmeAccountPrefix+acct.KeyId, acct) - if err != nil { - return fmt.Errorf("error creating account entry: %w", err) - } - - if err := ac.sc.Storage.Put(ac.sc.Context, json); err != nil { - return fmt.Errorf("error writing account entry: %w", err) - } - - return nil -} - -// LoadAccount will load the account object based on the passed in keyId field value -// otherwise will return an error if the account does not exist. -func (a *acmeState) LoadAccount(ac *acmeContext, keyId string) (*acmeAccount, error) { - entry, err := ac.sc.Storage.Get(ac.sc.Context, acmeAccountPrefix+keyId) - if err != nil { - return nil, fmt.Errorf("error loading account: %w", err) - } - if entry == nil { - return nil, fmt.Errorf("account not found: %w", ErrAccountDoesNotExist) - } - - var acct acmeAccount - err = entry.DecodeJSON(&acct) - if err != nil { - return nil, fmt.Errorf("error decoding account: %w", err) - } - - if acct.AcmeDirectory != ac.acmeDirectory { - return nil, fmt.Errorf("%w: account part of different ACME directory path", ErrMalformed) - } - - acct.KeyId = keyId - - return &acct, nil -} - -// LoadAccountByKey will attempt to load the account based on a key thumbprint. If the thumbprint -// or kid is unknown a nil, nil will be returned. -func (a *acmeState) LoadAccountByKey(ac *acmeContext, keyThumbprint string) (*acmeAccount, error) { - thumbprintEntry, err := ac.sc.Storage.Get(ac.sc.Context, acmeThumbprintPrefix+keyThumbprint) - if err != nil { - return nil, fmt.Errorf("failed loading acme thumbprintEntry for key: %w", err) - } - if thumbprintEntry == nil { - return nil, nil - } - - var thumbprint acmeThumbprint - err = thumbprintEntry.DecodeJSON(&thumbprint) - if err != nil { - return nil, fmt.Errorf("failed decoding thumbprint entry: %s: %w", keyThumbprint, err) - } - - if len(thumbprint.Kid) == 0 { - return nil, fmt.Errorf("empty kid within thumbprint entry: %s", keyThumbprint) - } - - acct, err := a.LoadAccount(ac, thumbprint.Kid) - if err != nil { - // If we fail to lookup the account that the thumbprint entry references, assume a bad - // write previously occurred in which we managed to write out the thumbprint but failed - // writing out the main account information. - if errors.Is(err, ErrAccountDoesNotExist) { - return nil, nil - } - return nil, err - } - - return acct, nil -} - -func (a *acmeState) LoadJWK(ac *acmeContext, keyId string) ([]byte, error) { - key, err := a.LoadAccount(ac, keyId) - if err != nil { - return nil, err - } - - if len(key.Jwk) == 0 { - return nil, fmt.Errorf("malformed key entry lacks JWK") - } - - return key.Jwk, nil -} - -func (a *acmeState) LoadAuthorization(ac *acmeContext, userCtx *jwsCtx, authId string) (*ACMEAuthorization, error) { - if authId == "" { - return nil, fmt.Errorf("malformed authorization identifier") - } - - authorizationPath := getAuthorizationPath(userCtx.Kid, authId) - - authz, err := loadAuthorizationAtPath(ac.sc, authorizationPath) - if err != nil { - return nil, err - } - - if userCtx.Kid != authz.AccountId { - return nil, ErrUnauthorized - } - - return authz, nil -} - -func loadAuthorizationAtPath(sc *storageContext, authorizationPath string) (*ACMEAuthorization, error) { - entry, err := sc.Storage.Get(sc.Context, authorizationPath) - if err != nil { - return nil, fmt.Errorf("error loading authorization: %w", err) - } - - if entry == nil { - return nil, fmt.Errorf("authorization does not exist: %w", ErrMalformed) - } - - var authz ACMEAuthorization - err = entry.DecodeJSON(&authz) - if err != nil { - return nil, fmt.Errorf("error decoding authorization: %w", err) - } - - return &authz, nil -} - -func (a *acmeState) SaveAuthorization(ac *acmeContext, authz *ACMEAuthorization) error { - path := getAuthorizationPath(authz.AccountId, authz.Id) - return saveAuthorizationAtPath(ac.sc, path, authz) -} - -func saveAuthorizationAtPath(sc *storageContext, path string, authz *ACMEAuthorization) error { - if authz.Id == "" { - return fmt.Errorf("invalid authorization, missing id") - } - - if authz.AccountId == "" { - return fmt.Errorf("invalid authorization, missing account id") - } - - json, err := logical.StorageEntryJSON(path, authz) - if err != nil { - return fmt.Errorf("error creating authorization entry: %w", err) - } - - if err = sc.Storage.Put(sc.Context, json); err != nil { - return fmt.Errorf("error writing authorization entry: %w", err) - } - - return nil -} - -func (a *acmeState) ParseRequestParams(ac *acmeContext, req *logical.Request, data *framework.FieldData) (*jwsCtx, map[string]interface{}, error) { - var c jwsCtx - var m map[string]interface{} - - // Parse the key out. - rawJWKBase64, ok := data.GetOk("protected") - if !ok { - return nil, nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) - } - jwkBase64 := rawJWKBase64.(string) - - jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) - if err != nil { - return nil, nil, fmt.Errorf("failed to base64 parse 'protected': %s: %w", err, ErrMalformed) - } - if err = c.UnmarshalOuterJwsJson(a, ac, jwkBytes); err != nil { - return nil, nil, fmt.Errorf("failed to json unmarshal 'protected': %w", err) - } - - // Since we already parsed the header to verify the JWS context, we - // should read and redeem the nonce here too, to avoid doing any extra - // work if it is invalid. - if !a.RedeemNonce(c.Nonce) { - return nil, nil, fmt.Errorf("invalid or reused nonce: %w", ErrBadNonce) - } - - // If the path is incorrect, reject the request. - // - // See RFC 8555 Section 6.4. Request URL Integrity: - // - // > As noted in Section 6.2, all ACME request objects carry a "url" - // > header parameter in their protected header. ... On receiving such - // > an object in an HTTP request, the server MUST compare the "url" - // > header parameter to the request URL. If the two do not match, - // > then the server MUST reject the request as unauthorized. - if len(c.Url) == 0 { - return nil, nil, fmt.Errorf("missing required parameter 'url' in 'protected': %w", ErrMalformed) - } - if ac.clusterUrl.JoinPath(req.Path).String() != c.Url { - return nil, nil, fmt.Errorf("invalid value for 'url' in 'protected': got '%v' expected '%v': %w", c.Url, ac.clusterUrl.JoinPath(req.Path).String(), ErrUnauthorized) - } - - rawPayloadBase64, ok := data.GetOk("payload") - if !ok { - return nil, nil, fmt.Errorf("missing required field 'payload': %w", ErrMalformed) - } - payloadBase64 := rawPayloadBase64.(string) - - rawSignatureBase64, ok := data.GetOk("signature") - if !ok { - return nil, nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) - } - signatureBase64 := rawSignatureBase64.(string) - - // go-jose only seems to support compact signature encodings. - compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) - m, err = c.VerifyJWS(compactSig) - if err != nil { - return nil, nil, fmt.Errorf("failed to verify signature: %w", err) - } - - return &c, m, nil -} - -func (a *acmeState) LoadOrder(ac *acmeContext, userCtx *jwsCtx, orderId string) (*acmeOrder, error) { - path := getOrderPath(userCtx.Kid, orderId) - entry, err := ac.sc.Storage.Get(ac.sc.Context, path) - if err != nil { - return nil, fmt.Errorf("error loading order: %w", err) - } - - if entry == nil { - return nil, fmt.Errorf("order does not exist: %w", ErrMalformed) - } - - var order acmeOrder - err = entry.DecodeJSON(&order) - if err != nil { - return nil, fmt.Errorf("error decoding order: %w", err) - } - - if userCtx.Kid != order.AccountId { - return nil, ErrUnauthorized - } - - order.OrderId = orderId - - return &order, nil -} - -func (a *acmeState) SaveOrder(ac *acmeContext, order *acmeOrder) error { - if order.OrderId == "" { - return fmt.Errorf("invalid order, missing order id") - } - - if order.AccountId == "" { - return fmt.Errorf("invalid order, missing account id") - } - path := getOrderPath(order.AccountId, order.OrderId) - json, err := logical.StorageEntryJSON(path, order) - if err != nil { - return fmt.Errorf("error serializing order entry: %w", err) - } - - if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { - return fmt.Errorf("error writing order entry: %w", err) - } - - return nil -} - -func (a *acmeState) ListOrderIds(ac *acmeContext, accountId string) ([]string, error) { - accountOrderPrefixPath := acmeAccountPrefix + accountId + "/orders/" - - rawOrderIds, err := ac.sc.Storage.List(ac.sc.Context, accountOrderPrefixPath) - if err != nil { - return nil, fmt.Errorf("failed listing order ids for account %s: %w", accountId, err) - } - - orderIds := []string{} - for _, order := range rawOrderIds { - if strings.HasSuffix(order, "/") { - // skip any folders we might have for some reason - continue - } - orderIds = append(orderIds, order) - } - return orderIds, nil -} - -type acmeCertEntry struct { - Serial string `json:"-"` - Account string `json:"-"` - Order string `json:"order"` -} - -func (a *acmeState) TrackIssuedCert(ac *acmeContext, accountId string, serial string, orderId string) error { - path := getAcmeSerialToAccountTrackerPath(accountId, serial) - entry := acmeCertEntry{ - Order: orderId, - } - - json, err := logical.StorageEntryJSON(path, &entry) - if err != nil { - return fmt.Errorf("error serializing acme cert entry: %w", err) - } - - if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { - return fmt.Errorf("error writing acme cert entry: %w", err) - } - - return nil -} - -func (a *acmeState) GetIssuedCert(ac *acmeContext, accountId string, serial string) (*acmeCertEntry, error) { - path := acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) - - entry, err := ac.sc.Storage.Get(ac.sc.Context, path) - if err != nil { - return nil, fmt.Errorf("error loading acme cert entry: %w", err) - } - - if entry == nil { - return nil, fmt.Errorf("no certificate with this serial was issued for this account") - } - - var cert acmeCertEntry - err = entry.DecodeJSON(&cert) - if err != nil { - return nil, fmt.Errorf("error decoding acme cert entry: %w", err) - } - - cert.Serial = denormalizeSerial(serial) - cert.Account = accountId - - return &cert, nil -} - -func (a *acmeState) SaveEab(sc *storageContext, eab *eabType) error { - json, err := logical.StorageEntryJSON(path.Join(acmeEabPrefix, eab.KeyID), eab) - if err != nil { - return err - } - return sc.Storage.Put(sc.Context, json) -} - -func (a *acmeState) LoadEab(sc *storageContext, eabKid string) (*eabType, error) { - rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) - if err != nil { - return nil, err - } - if rawEntry == nil { - return nil, fmt.Errorf("%w: no eab found for kid %s", ErrStorageItemNotFound, eabKid) - } - - var eab eabType - err = rawEntry.DecodeJSON(&eab) - if err != nil { - return nil, err - } - - eab.KeyID = eabKid - return &eab, nil -} - -func (a *acmeState) DeleteEab(sc *storageContext, eabKid string) (bool, error) { - rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) - if err != nil { - return false, err - } - if rawEntry == nil { - return false, nil - } - - err = sc.Storage.Delete(sc.Context, path.Join(acmeEabPrefix, eabKid)) - if err != nil { - return false, err - } - return true, nil -} - -func (a *acmeState) ListEabIds(sc *storageContext) ([]string, error) { - entries, err := sc.Storage.List(sc.Context, acmeEabPrefix) - if err != nil { - return nil, err - } - var ids []string - for _, entry := range entries { - if strings.HasSuffix(entry, "/") { - continue - } - ids = append(ids, entry) - } - - return ids, nil -} - -func getAcmeSerialToAccountTrackerPath(accountId string, serial string) string { - return acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) -} - -func getAuthorizationPath(accountId string, authId string) string { - return acmeAccountPrefix + accountId + "/authorizations/" + authId -} - -func getOrderPath(accountId string, orderId string) string { - return acmeAccountPrefix + accountId + "/orders/" + orderId -} - -func getACMEToken() (string, error) { - return generateRandomBase64(tokenBytes) -} diff --git a/builtin/logical/pki/acme_state_test.go b/builtin/logical/pki/acme_state_test.go deleted file mode 100644 index 8d4f12127ab20..0000000000000 --- a/builtin/logical/pki/acme_state_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAcmeNonces(t *testing.T) { - t.Parallel() - - a := NewACMEState() - a.nonces.Initialize() - - // Simple operation should succeed. - nonce, _, err := a.GetNonce() - require.NoError(t, err) - require.NotEmpty(t, nonce) - - require.True(t, a.RedeemNonce(nonce)) - require.False(t, a.RedeemNonce(nonce)) - - // Redeeming in opposite order should work. - var nonces []string - for i := 0; i < len(nonce); i++ { - nonce, _, err = a.GetNonce() - require.NoError(t, err) - require.NotEmpty(t, nonce) - } - - for i := len(nonces) - 1; i >= 0; i-- { - nonce = nonces[i] - require.True(t, a.RedeemNonce(nonce)) - } - - for i := 0; i < len(nonces); i++ { - nonce = nonces[i] - require.False(t, a.RedeemNonce(nonce)) - } -} diff --git a/builtin/logical/pki/acme_wrappers.go b/builtin/logical/pki/acme_wrappers.go deleted file mode 100644 index ef83bbce2c553..0000000000000 --- a/builtin/logical/pki/acme_wrappers.go +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -type acmeContext struct { - // baseUrl is the combination of the configured cluster local URL and the acmePath up to /acme/ - baseUrl *url.URL - clusterUrl *url.URL - sc *storageContext - role *roleEntry - issuer *issuerEntry - // acmeDirectory is a string that can distinguish the various acme directories we have configured - // if something needs to remain locked into a directory path structure. - acmeDirectory string - eabPolicy EabPolicy -} - -func (c acmeContext) getAcmeState() *acmeState { - return c.sc.Backend.acmeState -} - -type ( - acmeOperation func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) - acmeParsedOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) - acmeAccountRequiredOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, acct *acmeAccount) (*logical.Response, error) -) - -// acmeErrorWrapper the lowest level wrapper that will translate errors into proper ACME error responses -func acmeErrorWrapper(op framework.OperationFunc) framework.OperationFunc { - return func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { - resp, err := op(ctx, r, data) - if err != nil { - return TranslateError(err) - } - - return resp, nil - } -} - -// acmeWrapper a basic wrapper that all ACME handlers should leverage as the basis. -// This will create a basic ACME context, validate basic ACME configuration is setup -// for operations. This pulls in acmeErrorWrapper to translate error messages for users, -// but does not enforce any sort of ACME authentication. -func (b *backend) acmeWrapper(op acmeOperation) framework.OperationFunc { - return acmeErrorWrapper(func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, r.Storage) - - config, err := sc.Backend.acmeState.getConfigWithUpdate(sc) - if err != nil { - return nil, fmt.Errorf("failed to fetch ACME configuration: %w", err) - } - - // use string form in case someone messes up our config from raw storage. - eabPolicy, err := getEabPolicyByString(string(config.EabPolicyName)) - if err != nil { - return nil, err - } - - if isAcmeDisabled(sc, config, eabPolicy) { - return nil, ErrAcmeDisabled - } - - if b.useLegacyBundleCaStorage() { - return nil, fmt.Errorf("%w: Can not perform ACME operations until migration has completed", ErrServerInternal) - } - - acmeBaseUrl, clusterBase, err := getAcmeBaseUrl(sc, r) - if err != nil { - return nil, err - } - - role, issuer, err := getAcmeRoleAndIssuer(sc, data, config) - if err != nil { - return nil, err - } - - acmeDirectory, err := getAcmeDirectory(r) - if err != nil { - return nil, err - } - - acmeCtx := &acmeContext{ - baseUrl: acmeBaseUrl, - clusterUrl: clusterBase, - sc: sc, - role: role, - issuer: issuer, - acmeDirectory: acmeDirectory, - eabPolicy: eabPolicy, - } - - return op(acmeCtx, r, data) - }) -} - -// acmeParsedWrapper is an ACME wrapper that will parse out the ACME request parameters, validate -// that we have a proper signature and pass to the operation a decoded map of arguments received. -// This wrapper builds on top of acmeWrapper. Note that this does perform signature verification -// it does not enforce the account being in a valid state nor existing. -func (b *backend) acmeParsedWrapper(op acmeParsedOperation) framework.OperationFunc { - return b.acmeWrapper(func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData) (*logical.Response, error) { - user, data, err := b.acmeState.ParseRequestParams(acmeCtx, r, fields) - if err != nil { - return nil, err - } - - resp, err := op(acmeCtx, r, fields, user, data) - - // Our response handlers might not add the necessary headers. - if resp != nil { - if resp.Headers == nil { - resp.Headers = map[string][]string{} - } - - if _, ok := resp.Headers["Replay-Nonce"]; !ok { - nonce, _, err := b.acmeState.GetNonce() - if err != nil { - return nil, err - } - - resp.Headers["Replay-Nonce"] = []string{nonce} - } - - if _, ok := resp.Headers["Link"]; !ok { - resp.Headers["Link"] = genAcmeLinkHeader(acmeCtx) - } else { - directory := genAcmeLinkHeader(acmeCtx)[0] - addDirectory := true - for _, item := range resp.Headers["Link"] { - if item == directory { - addDirectory = false - break - } - } - if addDirectory { - resp.Headers["Link"] = append(resp.Headers["Link"], directory) - } - } - - // ACME responses don't understand Vault's default encoding - // format. Rather than expecting everything to handle creating - // ACME-formatted responses, do the marshaling in one place. - if _, ok := resp.Data[logical.HTTPRawBody]; !ok { - ignored_values := map[string]bool{logical.HTTPContentType: true, logical.HTTPStatusCode: true} - fields := map[string]interface{}{} - body := map[string]interface{}{ - logical.HTTPContentType: "application/json", - logical.HTTPStatusCode: http.StatusOK, - } - - for key, value := range resp.Data { - if _, present := ignored_values[key]; !present { - fields[key] = value - } else { - body[key] = value - } - } - - rawBody, err := json.Marshal(fields) - if err != nil { - return nil, fmt.Errorf("Error marshaling JSON body: %w", err) - } - - body[logical.HTTPRawBody] = rawBody - resp.Data = body - } - } - - return resp, err - }) -} - -// acmeAccountRequiredWrapper builds on top of acmeParsedWrapper, enforcing the -// request has a proper signature for an existing account, and that account is -// in a valid status. It passes to the operation a decoded form of the request -// parameters as well as the ACME account the request is for. -func (b *backend) acmeAccountRequiredWrapper(op acmeAccountRequiredOperation) framework.OperationFunc { - return b.acmeParsedWrapper(func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}) (*logical.Response, error) { - if !uc.Existing { - return nil, fmt.Errorf("cannot process request without a 'kid': %w", ErrMalformed) - } - - account, err := requireValidAcmeAccount(acmeCtx, uc) - if err != nil { - return nil, err - } - - return op(acmeCtx, r, fields, uc, data, account) - }) -} - -func requireValidAcmeAccount(acmeCtx *acmeContext, uc *jwsCtx) (*acmeAccount, error) { - account, err := acmeCtx.getAcmeState().LoadAccount(acmeCtx, uc.Kid) - if err != nil { - return nil, fmt.Errorf("error loading account: %w", err) - } - - if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { - return nil, err - } - - if account.Status != AccountStatusValid { - // Treating "revoked" and "deactivated" as the same here. - return nil, fmt.Errorf("%w: account in status: %s", ErrUnauthorized, account.Status) - } - return account, nil -} - -// A helper function that will build up the various path patterns we want for ACME APIs. -func buildAcmeFrameworkPaths(b *backend, patternFunc func(b *backend, pattern string) *framework.Path, acmeApi string) []*framework.Path { - var patterns []*framework.Path - for _, baseUrl := range []string{ - "acme", - "roles/" + framework.GenericNameRegex("role") + "/acme", - "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/acme", - "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/roles/" + framework.GenericNameRegex("role") + "/acme", - } { - - if !strings.HasPrefix(acmeApi, "/") { - acmeApi = "/" + acmeApi - } - - path := patternFunc(b, baseUrl+acmeApi) - patterns = append(patterns, path) - } - - return patterns -} - -func getAcmeBaseUrl(sc *storageContext, r *logical.Request) (*url.URL, *url.URL, error) { - baseUrl, err := getBasePathFromClusterConfig(sc) - if err != nil { - return nil, nil, err - } - - directoryPrefix, err := getAcmeDirectory(r) - if err != nil { - return nil, nil, err - } - - return baseUrl.JoinPath(directoryPrefix), baseUrl, nil -} - -func getBasePathFromClusterConfig(sc *storageContext) (*url.URL, error) { - cfg, err := sc.getClusterConfig() - if err != nil { - return nil, fmt.Errorf("failed loading cluster config: %w", err) - } - - if cfg.Path == "" { - return nil, fmt.Errorf("ACME feature requires local cluster 'path' field configuration to be set") - } - - baseUrl, err := url.Parse(cfg.Path) - if err != nil { - return nil, fmt.Errorf("failed parsing URL configured in local cluster 'path' configuration: %s: %s", - cfg.Path, err.Error()) - } - return baseUrl, nil -} - -func getAcmeIssuer(sc *storageContext, issuerName string) (*issuerEntry, error) { - if issuerName == "" { - issuerName = defaultRef - } - issuerId, err := sc.resolveIssuerReference(issuerName) - if err != nil { - return nil, fmt.Errorf("%w: issuer does not exist", ErrMalformed) - } - - issuer, err := sc.fetchIssuerById(issuerId) - if err != nil { - return nil, fmt.Errorf("issuer failed to load: %w", err) - } - - if issuer.Usage.HasUsage(IssuanceUsage) && len(issuer.KeyID) > 0 { - return issuer, nil - } - - return nil, fmt.Errorf("%w: issuer missing proper issuance usage or key", ErrServerInternal) -} - -// getAcmeDirectory return the base acme directory path, without a leading '/' and including -// the trailing /acme/ folder which is the root of all our various directories -func getAcmeDirectory(r *logical.Request) (string, error) { - acmePath := r.Path - if !strings.HasPrefix(acmePath, "/") { - acmePath = "/" + acmePath - } - - lastIndex := strings.LastIndex(acmePath, "/acme/") - if lastIndex == -1 { - return "", fmt.Errorf("%w: unable to determine acme base folder path: %s", ErrServerInternal, acmePath) - } - - // Skip the leading '/' and return our base path with the /acme/ - return strings.TrimLeft(acmePath[0:lastIndex]+"/acme/", "/"), nil -} - -func getAcmeRoleAndIssuer(sc *storageContext, data *framework.FieldData, config *acmeConfigEntry) (*roleEntry, *issuerEntry, error) { - requestedIssuer := getRequestedAcmeIssuerFromPath(data) - requestedRole := getRequestedAcmeRoleFromPath(data) - issuerToLoad := requestedIssuer - - var role *roleEntry - var err error - - if len(requestedRole) == 0 { // Default Directory - policyType, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) - if err != nil { - return nil, nil, err - } - switch policyType { - case Forbid: - return nil, nil, fmt.Errorf("%w: default directory not allowed by ACME policy", ErrServerInternal) - case SignVerbatim: - role = buildSignVerbatimRoleWithNoData(&roleEntry{ - Issuer: requestedIssuer, - NoStore: false, - Name: requestedRole, - }) - case Role: - defaultRole, err := getDefaultDirectoryPolicyRole(config.DefaultDirectoryPolicy) - if err != nil { - return nil, nil, err - } - role, err = getAndValidateAcmeRole(sc, defaultRole) - if err != nil { - return nil, nil, err - } - } - } else { // Requested Role - role, err = getAndValidateAcmeRole(sc, requestedRole) - if err != nil { - return nil, nil, err - } - - // Check the Requested Role is Allowed - allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" - if !allowAnyRole { - - var foundRole bool - for _, name := range config.AllowedRoles { - if name == role.Name { - foundRole = true - break - } - } - - if !foundRole { - return nil, nil, fmt.Errorf("%w: specified role not allowed by ACME policy", ErrServerInternal) - } - } - - } - - // If we haven't loaded an issuer directly from our path and the specified (or default) - // role does specify an issuer prefer the role's issuer rather than the default issuer. - if len(role.Issuer) > 0 && len(requestedIssuer) == 0 { - issuerToLoad = role.Issuer - } - - issuer, err := getAcmeIssuer(sc, issuerToLoad) - if err != nil { - return nil, nil, err - } - - allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" - if !allowAnyIssuer { - var foundIssuer bool - for index, name := range config.AllowedIssuers { - candidateId, err := sc.resolveIssuerReference(name) - if err != nil { - return nil, nil, fmt.Errorf("failed to resolve reference for allowed_issuer entry %d: %w", index, err) - } - - if candidateId == issuer.ID { - foundIssuer = true - break - } - } - - if !foundIssuer { - return nil, nil, fmt.Errorf("%w: specified issuer not allowed by ACME policy", ErrServerInternal) - } - } - - // Override ExtKeyUsage behavior to force it to only be ServerAuth within ACME issued certs - role.ExtKeyUsage = []string{"serverauth"} - role.ExtKeyUsageOIDs = []string{} - role.ServerFlag = true - role.ClientFlag = false - role.CodeSigningFlag = false - role.EmailProtectionFlag = false - - return role, issuer, nil -} - -func getAndValidateAcmeRole(sc *storageContext, requestedRole string) (*roleEntry, error) { - var err error - role, err := sc.Backend.getRole(sc.Context, sc.Storage, requestedRole) - if err != nil { - return nil, fmt.Errorf("%w: err loading role", ErrServerInternal) - } - - if role == nil { - return nil, fmt.Errorf("%w: role does not exist", ErrMalformed) - } - - if role.NoStore { - return nil, fmt.Errorf("%w: role can not be used as NoStore is set to true", ErrServerInternal) - } - - return role, nil -} - -func getRequestedAcmeRoleFromPath(data *framework.FieldData) string { - requestedRole := "" - roleNameRaw, present := data.GetOk("role") - if present { - requestedRole = roleNameRaw.(string) - } - return requestedRole -} - -func getRequestedAcmeIssuerFromPath(data *framework.FieldData) string { - requestedIssuer := "" - requestedIssuerRaw, present := data.GetOk(issuerRefParam) - if present { - requestedIssuer = requestedIssuerRaw.(string) - } - return requestedIssuer -} - -func isAcmeDisabled(sc *storageContext, config *acmeConfigEntry, policy EabPolicy) bool { - if !config.Enabled { - return true - } - - disableAcme, nonFatalErr := isPublicACMEDisabledByEnv() - if nonFatalErr != nil { - sc.Backend.Logger().Warn(fmt.Sprintf("could not parse env var '%s'", disableAcmeEnvVar), "error", nonFatalErr) - } - - // The OS environment if true will override any configuration option. - if disableAcme { - if policy.OverrideEnvDisablingPublicAcme() { - return false - } - return true - } - - return false -} diff --git a/builtin/logical/pki/acme_wrappers_test.go b/builtin/logical/pki/acme_wrappers_test.go deleted file mode 100644 index 4182066495c4f..0000000000000 --- a/builtin/logical/pki/acme_wrappers_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" -) - -// TestACMEIssuerRoleLoading validates the role and issuer loading logic within the base -// ACME wrapper is correct. -func TestACMEIssuerRoleLoading(t *testing.T) { - b, s := CreateBackendWithStorage(t) - - _, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ - "path": "http://localhost:8200/v1/pki", - "aia_path": "http://localhost:8200/cdn/pki", - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "config/acme", map[string]interface{}{ - "enabled": true, - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault1.com", - "issuer_name": "issuer-1", - "key_type": "ec", - }) - require.NoError(t, err, "failed creating issuer issuer-1") - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault2.com", - "issuer_name": "issuer-2", - "key_type": "ec", - }) - require.NoError(t, err, "failed creating issuer issuer-2") - - _, err = CBWrite(b, s, "roles/role-bad-issuer", map[string]interface{}{ - issuerRefParam: "non-existant", - "no_store": "false", - }) - require.NoError(t, err, "failed creating role role-bad-issuer") - - _, err = CBWrite(b, s, "roles/role-no-store-enabled", map[string]interface{}{ - issuerRefParam: "issuer-2", - "no_store": "true", - }) - require.NoError(t, err, "failed creating role role-no-store-enabled") - - _, err = CBWrite(b, s, "roles/role-issuer-2", map[string]interface{}{ - issuerRefParam: "issuer-2", - "no_store": "false", - }) - require.NoError(t, err, "failed creating role role-issuer-2") - - tc := []struct { - name string - roleName string - issuerName string - expectedIssuerName string - expectErr bool - }{ - {name: "pass-default-use-default", roleName: "", issuerName: "", expectedIssuerName: "issuer-1", expectErr: false}, - {name: "pass-role-issuer-2", roleName: "role-issuer-2", issuerName: "", expectedIssuerName: "issuer-2", expectErr: false}, - {name: "pass-issuer-1-no-role", roleName: "", issuerName: "issuer-1", expectedIssuerName: "issuer-1", expectErr: false}, - {name: "fail-role-has-bad-issuer", roleName: "role-bad-issuer", issuerName: "", expectedIssuerName: "", expectErr: true}, - {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, - {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, - {name: "fail-role-does-not-exist", roleName: "non-existant", issuerName: "", expectedIssuerName: "", expectErr: true}, - {name: "fail-issuer-does-not-exist", roleName: "", issuerName: "non-existant", expectedIssuerName: "", expectErr: true}, - } - - for _, tt := range tc { - t.Run(tt.name, func(t *testing.T) { - f := b.acmeWrapper(func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if tt.roleName != acmeCtx.role.Name { - return nil, fmt.Errorf("expected role %s but got %s", tt.roleName, acmeCtx.role.Name) - } - - if tt.expectedIssuerName != acmeCtx.issuer.Name { - return nil, fmt.Errorf("expected issuer %s but got %s", tt.expectedIssuerName, acmeCtx.issuer.Name) - } - - return nil, nil - }) - - var acmePath string - fieldRaw := map[string]interface{}{} - if tt.issuerName != "" { - fieldRaw[issuerRefParam] = tt.issuerName - acmePath = "issuer/" + tt.issuerName + "/" - } - if tt.roleName != "" { - fieldRaw["role"] = tt.roleName - acmePath = acmePath + "roles/" + tt.roleName + "/" - } - - acmePath = strings.TrimLeft(acmePath+"/acme/directory", "/") - - resp, err := f(context.Background(), &logical.Request{Path: acmePath, Storage: s}, &framework.FieldData{ - Raw: fieldRaw, - Schema: getCsrSignVerbatimSchemaFields(), - }) - require.NoError(t, err, "all errors should be re-encoded") - - if tt.expectErr { - require.NotEqual(t, 200, resp.Data[logical.HTTPStatusCode]) - require.Equal(t, ErrorContentType, resp.Data[logical.HTTPContentType]) - } else { - if resp != nil { - t.Fatalf("expected no error got %s", string(resp.Data[logical.HTTPRawBody].([]uint8))) - } - } - }) - } -} diff --git a/builtin/logical/pki/backend.go b/builtin/logical/pki/backend.go index 8caf2f6a2db90..69fda68e540f5 100644 --- a/builtin/logical/pki/backend.go +++ b/builtin/logical/pki/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -14,22 +11,16 @@ import ( atomic2 "go.uber.org/atomic" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/armon/go-metrics" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) const ( - operationPrefixPKI = "pki" - operationPrefixPKIIssuer = "pki-issuer" - operationPrefixPKIIssuers = "pki-issuers" - operationPrefixPKIRoot = "pki-root" - noRole = 0 roleOptional = 1 roleRequired = 2 @@ -97,36 +88,20 @@ func Backend(conf *logical.BackendConfig) *backend { "issuer/+/crl/delta/der", "issuer/+/crl/delta/pem", "issuer/+/crl/delta", - "issuer/+/unified-crl/der", - "issuer/+/unified-crl/pem", - "issuer/+/unified-crl", - "issuer/+/unified-crl/delta/der", - "issuer/+/unified-crl/delta/pem", - "issuer/+/unified-crl/delta", "issuer/+/pem", "issuer/+/der", "issuer/+/json", "issuers/", // LIST operations append a '/' to the requested path "ocsp", // OCSP POST "ocsp/*", // OCSP GET - "unified-crl/delta", - "unified-crl/delta/pem", - "unified-crl/pem", - "unified-crl", - "unified-ocsp", // Unified OCSP POST - "unified-ocsp/*", // Unified OCSP GET - - // ACME paths are added below }, LocalStorage: []string{ revokedPath, - localDeltaWALPath, + deltaWALPath, legacyCRLPath, - clusterConfigPath, "crls/", "certs/", - acmePathPrefix, }, Root: []string{ @@ -136,15 +111,8 @@ func Backend(conf *logical.BackendConfig) *backend { SealWrapStorage: []string{ legacyCertBundlePath, - legacyCertBundleBackupPath, keyPrefix, }, - - WriteForwardedStorage: []string{ - crossRevocationPath, - unifiedRevocationWritePathPrefix, - unifiedDeltaWALPath, - }, }, Paths: []*framework.Path{ @@ -159,7 +127,6 @@ func Backend(conf *logical.BackendConfig) *backend { pathConfigCA(&b), pathConfigCRL(&b), pathConfigURLs(&b), - pathConfigCluster(&b), pathSignVerbatim(&b), pathSign(&b), pathIssue(&b), @@ -167,7 +134,6 @@ func Backend(conf *logical.BackendConfig) *backend { pathRotateDeltaCRL(&b), pathRevoke(&b), pathRevokeWithKey(&b), - pathListCertsRevoked(&b), pathTidy(&b), pathTidyCancel(&b), pathTidyStatus(&b), @@ -211,15 +177,6 @@ func Backend(conf *logical.BackendConfig) *backend { // OCSP APIs buildPathOcspGet(&b), buildPathOcspPost(&b), - - // CRL Signing - pathResignCrls(&b), - pathSignRevocationList(&b), - - // ACME - pathAcmeConfig(&b), - pathAcmeEabList(&b), - pathAcmeEabDelete(&b), }, Secrets: []*framework.Secret{ @@ -230,58 +187,6 @@ func Backend(conf *logical.BackendConfig) *backend { InitializeFunc: b.initialize, Invalidate: b.invalidate, PeriodicFunc: b.periodicFunc, - Clean: b.cleanup, - } - - // Add ACME paths to backend - var acmePaths []*framework.Path - acmePaths = append(acmePaths, pathAcmeDirectory(&b)...) - acmePaths = append(acmePaths, pathAcmeNonce(&b)...) - acmePaths = append(acmePaths, pathAcmeNewAccount(&b)...) - acmePaths = append(acmePaths, pathAcmeUpdateAccount(&b)...) - acmePaths = append(acmePaths, pathAcmeGetOrder(&b)...) - acmePaths = append(acmePaths, pathAcmeListOrders(&b)...) - acmePaths = append(acmePaths, pathAcmeNewOrder(&b)...) - acmePaths = append(acmePaths, pathAcmeFinalizeOrder(&b)...) - acmePaths = append(acmePaths, pathAcmeFetchOrderCert(&b)...) - acmePaths = append(acmePaths, pathAcmeChallenge(&b)...) - acmePaths = append(acmePaths, pathAcmeAuthorization(&b)...) - acmePaths = append(acmePaths, pathAcmeRevoke(&b)...) - acmePaths = append(acmePaths, pathAcmeNewEab(&b)...) // auth'd API that lives underneath the various /acme paths - - for _, acmePath := range acmePaths { - b.Backend.Paths = append(b.Backend.Paths, acmePath) - } - - // Add specific un-auth'd paths for ACME APIs - for _, acmePrefix := range []string{"", "issuer/+/", "roles/+/", "issuer/+/roles/+/"} { - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/directory") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/new-nonce") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/new-account") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/new-order") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/revoke-cert") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/key-change") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/account/+") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/authorization/+") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/challenge/+/+") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/orders") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/order/+") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/order/+/finalize") - b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/order/+/cert") - // We specifically do NOT add acme/new-eab to this as it should be auth'd - } - - if constants.IsEnterprise { - // Unified CRL/OCSP paths are ENT only - entOnly := []*framework.Path{ - pathGetIssuerUnifiedCRL(&b), - pathListCertsRevocationQueue(&b), - pathListUnifiedRevoked(&b), - pathFetchUnifiedCRL(&b), - buildPathUnifiedOcspGet(&b), - buildPathUnifiedOcspPost(&b), - } - b.Backend.Paths = append(b.Backend.Paths, entOnly...) } b.tidyCASGuard = new(uint32) @@ -303,18 +208,12 @@ func Backend(conf *logical.BackendConfig) *backend { b.lastTidy = time.Now() // Metrics initialization for count of certificates in storage - b.certCountEnabled = atomic2.NewBool(false) - b.publishCertCountMetrics = atomic2.NewBool(false) b.certsCounted = atomic2.NewBool(false) - b.certCountError = "Initialize Not Yet Run, Cert Counts Unavailable" - b.certCount = &atomic.Uint32{} - b.revokedCertCount = &atomic.Uint32{} + b.certCount = new(uint32) + b.revokedCertCount = new(uint32) b.possibleDoubleCountedSerials = make([]string, 0, 250) b.possibleDoubleCountedRevokedSerials = make([]string, 0, 250) - b.unifiedTransferStatus = newUnifiedTransferStatus() - - b.acmeState = NewACMEState() return &b } @@ -331,14 +230,9 @@ type backend struct { tidyStatus *tidyStatus lastTidy time.Time - unifiedTransferStatus *unifiedTransferStatus - - certCountEnabled *atomic2.Bool - publishCertCountMetrics *atomic2.Bool - certCount *atomic.Uint32 - revokedCertCount *atomic.Uint32 + certCount *uint32 + revokedCertCount *uint32 certsCounted *atomic2.Bool - certCountError string possibleDoubleCountedSerials []string possibleDoubleCountedRevokedSerials []string @@ -347,14 +241,40 @@ type backend struct { // Write lock around issuers and keys. issuersLock sync.RWMutex - - // Context around ACME operations - acmeState *acmeState - acmeAccountLock sync.RWMutex // (Write) Locked on Tidy, (Read) Locked on Account Creation - // TODO: Stress test this - eg. creating an order while an account is being revoked } -type roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) +type ( + tidyStatusState int + roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) +) + +const ( + tidyStatusInactive tidyStatusState = iota + tidyStatusStarted = iota + tidyStatusFinished = iota + tidyStatusError = iota + tidyStatusCancelling = iota + tidyStatusCancelled = iota +) + +type tidyStatus struct { + // Parameters used to initiate the operation + safetyBuffer int + tidyCertStore bool + tidyRevokedCerts bool + tidyRevokedAssocs bool + pauseDuration string + + // Status + state tidyStatusState + err error + timeStarted time.Time + timeFinished time.Time + message string + certStoreDeletedCount uint + revokedCertDeletedCount uint + missingIssuerCertCount uint +} const backendHelp = ` The PKI backend dynamically generates X509 server and client certificates. @@ -432,26 +352,15 @@ func (b *backend) initialize(ctx context.Context, _ *logical.InitializationReque return err } - err = b.acmeState.Initialize(b, sc) - if err != nil { - return err - } - // Initialize also needs to populate our certificate and revoked certificate count err = b.initializeStoredCertificateCounts(ctx) if err != nil { - // Don't block/err initialize/startup for metrics. Context on this call can time out due to number of certificates. - b.Logger().Error("Could not initialize stored certificate counts", "error", err) - b.certCountError = err.Error() + return err } return nil } -func (b *backend) cleanup(_ context.Context) { - b.acmeState.validator.Closing <- struct{}{} -} - func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { // Grab the lock prior to the updating of the storage lock preventing us flipping // the storage flag midway through the request stream of other requests. @@ -498,12 +407,6 @@ func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock b return } - // If this method is called outside the initialize function, like say an - // invalidate func on a performance replica cluster, we should be grabbing - // the issuers lock to offer a consistent view of the storage version while - // other events are processing things. Its unknown what might happen during - // a single event if one part thinks we are in legacy mode, and then later - // on we aren't. if grabIssuersLock { b.issuersLock.Lock() defer b.issuersLock.Unlock() @@ -517,9 +420,6 @@ func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock b } func (b *backend) invalidate(ctx context.Context, key string) { - isNotPerfPrimary := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) - switch { case strings.HasPrefix(key, legacyMigrationBundleLogKey): // This is for a secondary cluster to pick up that the migration has completed @@ -548,37 +448,8 @@ func (b *backend) invalidate(ctx context.Context, key string) { case key == "config/crl": // We may need to reload our OCSP status flag b.crlBuilder.markConfigDirty() - case key == storageAcmeConfig: - b.acmeState.markConfigDirty() case key == storageIssuerConfig: b.crlBuilder.invalidateCRLBuildTime() - case strings.HasPrefix(key, crossRevocationPrefix): - split := strings.Split(key, "/") - - if !strings.HasSuffix(key, "/confirmed") { - cluster := split[len(split)-2] - serial := split[len(split)-1] - b.crlBuilder.addCertForRevocationCheck(cluster, serial) - } else { - if len(split) >= 3 { - cluster := split[len(split)-3] - serial := split[len(split)-2] - // Only process confirmations on the perf primary. The - // performance secondaries cannot remove other clusters' - // entries, and so do not need to track them (only to - // ignore them). On performance primary nodes though, - // we do want to track them to remove them. - if !isNotPerfPrimary { - b.crlBuilder.addCertForRevocationRemoval(cluster, serial) - } - } - } - case strings.HasPrefix(key, unifiedRevocationReadPathPrefix): - // Three parts to this key: prefix, cluster, and serial. - split := strings.Split(key, "/") - cluster := split[len(split)-2] - serial := split[len(split)-1] - b.crlBuilder.addCertFromCrossRevocation(cluster, serial) } } @@ -598,48 +469,22 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er return nil } - // First handle any global revocation queue entries. - if err := b.crlBuilder.processRevocationQueue(sc); err != nil { - return err - } - - // Then handle any unified cross-cluster revocations. - if err := b.crlBuilder.processCrossClusterRevocations(sc); err != nil { - return err - } - // Check if we're set to auto rebuild and a CRL is set to expire. if err := b.crlBuilder.checkForAutoRebuild(sc); err != nil { return err } // Then attempt to rebuild the CRLs if required. - warnings, err := b.crlBuilder.rebuildIfForced(sc) - if err != nil { + if err := b.crlBuilder.rebuildIfForced(ctx, b, request); err != nil { return err } - if len(warnings) > 0 { - msg := "During rebuild of complete CRL, got the following warnings:" - for index, warning := range warnings { - msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) - } - b.Logger().Warn(msg) - } // If a delta CRL was rebuilt above as part of the complete CRL rebuild, // this will be a no-op. However, if we do need to rebuild delta CRLs, // this would cause us to do so. - warnings, err = b.crlBuilder.rebuildDeltaCRLsIfForced(sc, false) - if err != nil { + if err := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, false); err != nil { return err } - if len(warnings) > 0 { - msg := "During rebuild of delta CRL, got the following warnings:" - for index, warning := range warnings { - msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) - } - b.Logger().Warn(msg) - } return nil } @@ -695,35 +540,19 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er return nil } - // First tidy any ACME nonces to free memory. - b.acmeState.DoTidyNonces() - - // Then run unified transfer. - backgroundSc := b.makeStorageContext(context.Background(), b.storage) - go runUnifiedTransfer(backgroundSc) - - // Then run the CRL rebuild and tidy operation. crlErr := doCRL() tidyErr := doAutoTidy() - // Periodically re-emit gauges so that they don't disappear/go stale - tidyConfig, err := sc.getAutoTidyConfig() - if err != nil { - return err + if crlErr != nil && tidyErr != nil { + return fmt.Errorf("Error building CRLs:\n - %v\n\nError running auto-tidy:\n - %v\n", crlErr, tidyErr) } - b.emitCertStoreMetrics(tidyConfig) - var errors error if crlErr != nil { - errors = multierror.Append(errors, fmt.Errorf("Error building CRLs:\n - %w\n", crlErr)) + return fmt.Errorf("Error building CRLs:\n - %v\n", crlErr) } if tidyErr != nil { - errors = multierror.Append(errors, fmt.Errorf("Error running auto-tidy:\n - %w\n", tidyErr)) - } - - if errors != nil { - return errors + return fmt.Errorf("Error running auto-tidy:\n - %v\n", tidyErr) } // Check if the CRL was invalidated due to issuer swap and update @@ -737,51 +566,24 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er } func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { + b.tidyStatusLock.RLock() + defer b.tidyStatusLock.RUnlock() // For performance reasons, we can't lock on issuance/storage of certs until a list operation completes, // but we want to limit possible miscounts / double-counts to over-counting, so we take the tidy lock which // prevents (most) deletions - in particular we take a read lock (sufficient to block the write lock in // tidyStatusStart while allowing tidy to still acquire a read lock to report via its endpoint) - b.tidyStatusLock.RLock() - defer b.tidyStatusLock.RUnlock() - sc := b.makeStorageContext(ctx, b.storage) - config, err := sc.getAutoTidyConfig() - if err != nil { - return err - } - - b.certCountEnabled.Store(config.MaintainCount) - b.publishCertCountMetrics.Store(config.PublishMetrics) - - if config.MaintainCount == false { - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - b.certsCounted.Store(true) - b.certCount.Store(0) - b.revokedCertCount.Store(0) - b.certCountError = "Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts" - return nil - } - - // Ideally these three things would be set in one transaction, since that isn't possible, set the counts to "0", - // first, so count will over-count (and miss putting things in deduplicate queue), rather than under-count. - b.certCount.Store(0) - b.revokedCertCount.Store(0) - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - // A cert issued or revoked here will be double-counted. That's okay, this is "best effort" metrics. - b.certsCounted.Store(false) entries, err := b.storage.List(ctx, "certs/") if err != nil { return err } - b.certCount.Add(uint32(len(entries))) + atomic.AddUint32(b.certCount, uint32(len(entries))) revokedEntries, err := b.storage.List(ctx, "revoked/") if err != nil { return err } - b.revokedCertCount.Add(uint32(len(revokedEntries))) + atomic.AddUint32(b.revokedCertCount, uint32(len(revokedEntries))) b.certsCounted.Store(true) // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count @@ -862,98 +664,64 @@ func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { b.possibleDoubleCountedRevokedSerials = nil b.possibleDoubleCountedSerials = nil - b.emitCertStoreMetrics(config) - - b.certCountError = "" + certCount := atomic.LoadUint32(b.certCount) + metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) + revokedCertCount := atomic.LoadUint32(b.revokedCertCount) + metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) return nil } -func (b *backend) emitCertStoreMetrics(config *tidyConfig) { - if config.PublishMetrics == true { - certCount := b.certCount.Load() - b.emitTotalCertCountMetric(certCount) - revokedCertCount := b.revokedCertCount.Load() - b.emitTotalRevokedCountMetric(revokedCertCount) - } -} - // The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: // eg. certsCounted := b.certsCounted.Load() -func (b *backend) ifCountEnabledIncrementTotalCertificatesCount(certsCounted bool, newSerial string) { - if b.certCountEnabled.Load() { - certCount := b.certCount.Add(1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "certs/") { - newSerial = newSerial[6:] - } - b.possibleDoubleCountedSerials = append(b.possibleDoubleCountedSerials, newSerial) - default: - if b.publishCertCountMetrics.Load() { - b.emitTotalCertCountMetric(certCount) - } - } - } -} - -func (b *backend) ifCountEnabledDecrementTotalCertificatesCountReport() { - if b.certCountEnabled.Load() { - certCount := b.decrementTotalCertificatesCountNoReport() - if b.publishCertCountMetrics.Load() { - b.emitTotalCertCountMetric(certCount) +func (b *backend) incrementTotalCertificatesCount(certsCounted bool, newSerial string) { + certCount := atomic.AddUint32(b.certCount, 1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "certs/") { + newSerial = newSerial[6:] } + b.possibleDoubleCountedSerials = append(b.possibleDoubleCountedSerials, newSerial) + default: + metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) } } -func (b *backend) emitTotalCertCountMetric(certCount uint32) { +func (b *backend) decrementTotalCertificatesCountReport() { + certCount := b.decrementTotalCertificatesCountNoReport() metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) } // Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -// Does not respect whether-we-are-counting backend information. func (b *backend) decrementTotalCertificatesCountNoReport() uint32 { - newCount := b.certCount.Add(^uint32(0)) + newCount := atomic.AddUint32(b.certCount, ^uint32(0)) return newCount } // The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: // eg. certsCounted := b.certsCounted.Load() -func (b *backend) ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { - if b.certCountEnabled.Load() { - newRevokedCertCount := b.revokedCertCount.Add(1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial - newSerial = newSerial[8:] - } - b.possibleDoubleCountedRevokedSerials = append(b.possibleDoubleCountedRevokedSerials, newSerial) - default: - if b.publishCertCountMetrics.Load() { - b.emitTotalRevokedCountMetric(newRevokedCertCount) - } - } - } -} - -func (b *backend) ifCountEnabledDecrementTotalRevokedCertificatesCountReport() { - if b.certCountEnabled.Load() { - revokedCertCount := b.decrementTotalRevokedCertificatesCountNoReport() - if b.publishCertCountMetrics.Load() { - b.emitTotalRevokedCountMetric(revokedCertCount) +func (b *backend) incrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { + newRevokedCertCount := atomic.AddUint32(b.revokedCertCount, 1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial + newSerial = newSerial[8:] } + b.possibleDoubleCountedRevokedSerials = append(b.possibleDoubleCountedRevokedSerials, newSerial) + default: + metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(newRevokedCertCount)) } } -func (b *backend) emitTotalRevokedCountMetric(revokedCertCount uint32) { +func (b *backend) decrementTotalRevokedCertificatesCountReport() { + revokedCertCount := b.decrementTotalRevokedCertificatesCountNoReport() metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) } // Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -// Does not respect whether-we-are-counting backend information. func (b *backend) decrementTotalRevokedCertificatesCountNoReport() uint32 { - newRevokedCertCount := b.revokedCertCount.Add(^uint32(0)) + newRevokedCertCount := atomic.AddUint32(b.revokedCertCount, ^uint32(0)) return newRevokedCertCount } diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index 6e779199363ef..69a379d79f8bd 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -30,15 +27,10 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" - - "github.com/hashicorp/vault/helper/testhelpers" - - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/stretchr/testify/require" "github.com/armon/go-metrics" @@ -112,7 +104,7 @@ OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= func TestPKI_RequireCN(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "common_name": "myvault.com", @@ -137,10 +129,9 @@ func TestPKI_RequireCN(t *testing.T) { // Issue a cert with require_cn set to true and with common name supplied. // It should succeed. - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + _, err = CBWrite(b, s, "issue/example", map[string]interface{}{ "common_name": "foobar.com", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issue/example"), logical.UpdateOperation), resp, true) if err != nil { t.Fatal(err) } @@ -189,7 +180,7 @@ func TestPKI_RequireCN(t *testing.T) { func TestPKI_DeviceCert(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "common_name": "myvault.com", @@ -258,7 +249,7 @@ func TestPKI_DeviceCert(t *testing.T) { func TestBackend_InvalidParameter(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "common_name": "myvault.com", @@ -281,7 +272,7 @@ func TestBackend_InvalidParameter(t *testing.T) { func TestBackend_CSRValues(t *testing.T) { t.Parallel() initTest.Do(setCerts) - b, _ := CreateBackendWithStorage(t) + b, _ := createBackendWithStorage(t) testCase := logicaltest.TestCase{ LogicalBackend: b, @@ -298,7 +289,7 @@ func TestBackend_CSRValues(t *testing.T) { func TestBackend_URLsCRUD(t *testing.T) { t.Parallel() initTest.Do(setCerts) - b, _ := CreateBackendWithStorage(t) + b, _ := createBackendWithStorage(t) testCase := logicaltest.TestCase{ LogicalBackend: b, @@ -334,7 +325,7 @@ func TestBackend_Roles(t *testing.T) { t.Run(tc.name, func(t *testing.T) { initTest.Do(setCerts) - b, _ := CreateBackendWithStorage(t) + b, _ := createBackendWithStorage(t) testCase := logicaltest.TestCase{ LogicalBackend: b, @@ -475,13 +466,8 @@ func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUs } } - // TODO: We incremented 20->25 due to CircleCI execution - // being slow and pausing this test. We might consider recording the - // actual issuance time of the cert and calculating the expected - // validity period +/- fuzz, but that'd require recording and passing - // through more information. - if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 25 { - return nil, fmt.Errorf("certificate validity end: %s; expected within 25 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339)) + if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 20 { + return nil, fmt.Errorf("certificate validity end: %s; expected within 20 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339)) } return parsedCertBundle, nil @@ -560,6 +546,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s if err != nil { return err } + if !reflect.DeepEqual(entries, expected) { return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries) } @@ -612,7 +599,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s certBytes, _ := base64.StdEncoding.DecodeString(certString) certs, err := x509.ParseCertificates(certBytes) if err != nil { - return fmt.Errorf("returned cert cannot be parsed: %w", err) + return fmt.Errorf("returned cert cannot be parsed: %v", err) } if len(certs) != 1 { return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) @@ -665,7 +652,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s certBytes, _ := base64.StdEncoding.DecodeString(certString) certs, err := x509.ParseCertificates(certBytes) if err != nil { - return fmt.Errorf("returned cert cannot be parsed: %w", err) + return fmt.Errorf("returned cert cannot be parsed: %v", err) } if len(certs) != 1 { return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) @@ -691,8 +678,6 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s } func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType string, keyBits int) (interface{}, []byte, string) { - t.Helper() - var priv interface{} var err error switch keyType { @@ -788,7 +773,7 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s certBytes, _ := base64.StdEncoding.DecodeString(certString) certs, err := x509.ParseCertificates(certBytes) if err != nil { - return fmt.Errorf("returned cert cannot be parsed: %w", err) + return fmt.Errorf("returned cert cannot be parsed: %v", err) } if len(certs) != 1 { return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) @@ -824,8 +809,6 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s } func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) (x509.CertificateRequest, string) { - t.Helper() - csrTemplate := x509.CertificateRequest{ Subject: pkix.Name{ Country: []string{"MyCountry"}, @@ -1747,7 +1730,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { func TestRolesAltIssuer(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Create two issuers. resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -1845,7 +1828,7 @@ func TestRolesAltIssuer(t *testing.T) { func TestBackend_PathFetchValidRaw(t *testing.T) { t.Parallel() - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -1887,7 +1870,6 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { Data: map[string]interface{}{}, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("ca/pem"), logical.ReadOperation), resp, true) require.NoError(t, err) if resp != nil && resp.IsError() { t.Fatalf("failed read ca/pem, %#v", resp) @@ -1978,7 +1960,7 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { func TestBackend_PathFetchCertList(t *testing.T) { t.Parallel() // create the backend - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) // generate root rootData := map[string]interface{}{ @@ -1993,7 +1975,6 @@ func TestBackend_PathFetchCertList(t *testing.T) { Data: rootData, MountPoint: "pki/", }) - if resp != nil && resp.IsError() { t.Fatalf("failed to generate root, %#v", resp) } @@ -2014,16 +1995,6 @@ func TestBackend_PathFetchCertList(t *testing.T) { Data: urlsData, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.UpdateOperation), resp, true) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "config/urls", - Storage: storage, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.ReadOperation), resp, true) - if resp != nil && resp.IsError() { t.Fatalf("failed to config urls, %#v", resp) } @@ -2133,7 +2104,7 @@ func TestBackend_SignVerbatim(t *testing.T) { func runTestSignVerbatim(t *testing.T, keyType string) { // create the backend - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) // generate root rootData := map[string]interface{}{ @@ -2164,16 +2135,6 @@ func runTestSignVerbatim(t *testing.T, keyType string) { Subject: pkix.Name{ CommonName: "foo.bar.com", }, - // Check that otherName extensions are not duplicated (see hashicorp/vault#16700). - // If these extensions are duplicated, sign-verbatim will fail when parsing the signed certificate on Go 1.19+ (see golang/go#50988). - // On older versions of Go this test will fail due to an explicit check for duplicate otherNames later in this test. - ExtraExtensions: []pkix.Extension{ - { - Id: oidExtensionSubjectAltName, - Critical: false, - Value: []byte{0x30, 0x26, 0xA0, 0x24, 0x06, 0x0A, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, 0xA0, 0x16, 0x0C, 0x14, 0x75, 0x73, 0x65, 0x72, 0x6E, 0x61, 0x6D, 0x65, 0x40, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x2E, 0x63, 0x6F, 0x6D}, - }, - }, } csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key) if err != nil { @@ -2203,8 +2164,6 @@ func runTestSignVerbatim(t *testing.T, keyType string) { Data: signVerbatimData, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign-verbatim"), logical.UpdateOperation), resp, true) - if resp != nil && resp.IsError() { t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp) } @@ -2326,19 +2285,6 @@ func runTestSignVerbatim(t *testing.T, keyType string) { t.Fatalf("expected a single cert, got %d", len(certs)) } cert = certs[0] - - // Fallback check for duplicate otherName, necessary on Go versions before 1.19. - // We assume that there is only one SAN in the original CSR and that it is an otherName. - san_count := 0 - for _, ext := range cert.Extensions { - if ext.Id.Equal(oidExtensionSubjectAltName) { - san_count += 1 - } - } - if san_count != 1 { - t.Fatalf("expected one SAN extension, got %d", san_count) - } - notAfter := cert.NotAfter.Format(time.RFC3339) if notAfter != "9999-12-31T23:59:59Z" { t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) @@ -2390,7 +2336,7 @@ func runTestSignVerbatim(t *testing.T, keyType string) { func TestBackend_Root_Idempotency(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // This is a change within 1.11, we are no longer idempotent across generate/internal calls. resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -2438,7 +2384,6 @@ func TestBackend_Root_Idempotency(t *testing.T) { // Now because the issued CA's have no links, the call to ca_chain should return the same data (ca chain from default) resp, err = CBRead(b, s, "cert/ca_chain") require.NoError(t, err, "error reading ca_chain: %v", err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("cert/ca_chain"), logical.ReadOperation), resp, true) r2Data := resp.Data if !reflect.DeepEqual(r1Data, r2Data) { @@ -2450,31 +2395,15 @@ func TestBackend_Root_Idempotency(t *testing.T) { resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ "pem_bundle": pemBundleRootCA, }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/ca"), logical.UpdateOperation), resp, true) - require.NoError(t, err) require.NotNil(t, resp, "expected ca info") - firstMapping := resp.Data["mapping"].(map[string]string) firstImportedKeys := resp.Data["imported_keys"].([]string) firstImportedIssuers := resp.Data["imported_issuers"].([]string) - firstExistingKeys := resp.Data["existing_keys"].([]string) - firstExistingIssuers := resp.Data["existing_issuers"].([]string) require.NotContains(t, firstImportedKeys, keyId1) require.NotContains(t, firstImportedKeys, keyId2) require.NotContains(t, firstImportedIssuers, issuerId1) require.NotContains(t, firstImportedIssuers, issuerId2) - require.Empty(t, firstExistingKeys) - require.Empty(t, firstExistingIssuers) - require.NotEmpty(t, firstMapping) - require.Equal(t, 1, len(firstMapping)) - - var issuerId3 string - var keyId3 string - for i, k := range firstMapping { - issuerId3 = i - keyId3 = k - } // Performing this again should result in no key/issuer ids being imported/generated. resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ @@ -2482,17 +2411,11 @@ func TestBackend_Root_Idempotency(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, resp, "expected ca info") - secondMapping := resp.Data["mapping"].(map[string]string) secondImportedKeys := resp.Data["imported_keys"] secondImportedIssuers := resp.Data["imported_issuers"] - secondExistingKeys := resp.Data["existing_keys"] - secondExistingIssuers := resp.Data["existing_issuers"] - require.Empty(t, secondImportedKeys) - require.Empty(t, secondImportedIssuers) - require.Contains(t, secondExistingKeys, keyId3) - require.Contains(t, secondExistingIssuers, issuerId3) - require.Equal(t, 1, len(secondMapping)) + require.Nil(t, secondImportedKeys) + require.Nil(t, secondImportedIssuers) resp, err = CBDelete(b, s, "root") require.NoError(t, err) @@ -2532,10 +2455,10 @@ func TestBackend_Root_Idempotency(t *testing.T) { } } -func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { +func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { t.Parallel() - b_root, s_root := CreateBackendWithStorage(t) - b_int, s_int := CreateBackendWithStorage(t) + b_root, s_root := createBackendWithStorage(t) + b_int, s_int := createBackendWithStorage(t) var err error // Direct issuing from root @@ -2550,7 +2473,6 @@ func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { _, err = CBWrite(b_root, s_root, "roles/test", map[string]interface{}{ "allow_bare_domains": true, "allow_subdomains": true, - "allow_any_name": true, }) if err != nil { t.Fatal(err) @@ -2559,7 +2481,6 @@ func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { resp, err := CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ "common_name": "myint.com", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b_root.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) require.Contains(t, resp.Data, "key_id") intKeyId := resp.Data["key_id"].(keyID) csr := resp.Data["csr"] @@ -2578,7 +2499,9 @@ func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { "csr": csr, "ttl": "60h", }) - require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") + if err == nil { + t.Fatal("expected error") + } _, err = CBWrite(b_root, s_root, "sign-verbatim/test", map[string]interface{}{ "common_name": "myint.com", @@ -2586,7 +2509,9 @@ func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { "csr": csr, "ttl": "60h", }) - require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") + if err == nil { + t.Fatal("expected error") + } resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ "common_name": "myint.com", @@ -2612,7 +2537,7 @@ func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { t.Parallel() // create the backend - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // generate root data, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -2647,7 +2572,7 @@ func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { func TestBackend_SignSelfIssued(t *testing.T) { t.Parallel() // create the backend - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) // generate root rootData := map[string]interface{}{ @@ -2744,7 +2669,6 @@ func TestBackend_SignSelfIssued(t *testing.T) { }, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("root/sign-self-issued"), logical.UpdateOperation), resp, true) if err != nil { t.Fatal(err) } @@ -2789,7 +2713,7 @@ func TestBackend_SignSelfIssued(t *testing.T) { func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { t.Parallel() // create the backend - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) // generate root rootData := map[string]interface{}{ @@ -2914,7 +2838,7 @@ func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { // easy to validate. func TestBackend_OID_SANs(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) var err error var resp *logical.Response @@ -3137,7 +3061,7 @@ func TestBackend_OID_SANs(t *testing.T) { func TestBackend_AllowedSerialNumbers(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) var err error var resp *logical.Response @@ -3244,7 +3168,7 @@ func TestBackend_AllowedSerialNumbers(t *testing.T) { func TestBackend_URI_SANs(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) var err error @@ -3696,7 +3620,6 @@ func TestReadWriteDeleteRoles(t *testing.T) { "code_signing_flag": false, "issuer_ref": "default", "cn_validations": []interface{}{"email", "hostname"}, - "allowed_user_ids": []interface{}{}, } if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { @@ -3868,15 +3791,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal(err) } - // Set up Metric Configuration, then restart to enable it - _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": true, - }) - _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ - "mounts": "pki/", - }) - // Check the metrics initialized in order to calculate backendUUID for /pki // BackendUUID not consistent during tests with UUID from /sys/mounts/pki metricsSuffix := "total_certificates_stored" @@ -3913,14 +3827,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { if err != nil { t.Fatal(err) } - // Set up Metric Configuration, then restart to enable it - _, err = client.Logical().Write("pki2/config/auto-tidy", map[string]interface{}{ - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": true, - }) - _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ - "mounts": "pki2/", - }) // Create a CSR for the intermediate CA secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) @@ -3946,7 +3852,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { if err != nil { t.Fatal(err) } - if secret == nil || len(secret.Data) == 0 || len(secret.Data["certificate"].(string)) == 0 { t.Fatal("expected certificate information from read operation") } @@ -4029,36 +3934,20 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { } expectedData := map[string]interface{}{ "safety_buffer": json.Number("1"), - "issuer_safety_buffer": json.Number("31536000"), - "revocation_queue_safety_buffer": json.Number("172800"), "tidy_cert_store": true, "tidy_revoked_certs": true, "tidy_revoked_cert_issuer_associations": false, - "tidy_expired_issuers": false, - "tidy_move_legacy_ca_bundle": false, - "tidy_revocation_queue": false, - "tidy_cross_cluster_revoked_certs": false, "pause_duration": "0s", "state": "Finished", "error": nil, "time_started": nil, "time_finished": nil, - "last_auto_tidy_finished": nil, "message": nil, "cert_store_deleted_count": json.Number("1"), "revoked_cert_deleted_count": json.Number("1"), "missing_issuer_cert_count": json.Number("0"), "current_cert_store_count": json.Number("0"), "current_revoked_cert_count": json.Number("0"), - "revocation_queue_deleted_count": json.Number("0"), - "cross_revoked_cert_deleted_count": json.Number("0"), - "internal_backend_uuid": backendUUID, - "tidy_acme": false, - "acme_account_safety_buffer": json.Number("2592000"), - "acme_orders_deleted_count": json.Number("0"), - "acme_account_revoked_count": json.Number("0"), - "acme_account_deleted_count": json.Number("0"), - "total_acme_account_count": json.Number("0"), } // Let's copy the times from the response so that we can use deep.Equal() timeStarted, ok := tidyStatus.Data["time_started"] @@ -4071,7 +3960,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal("Expected tidy status response to include a value for time_finished") } expectedData["time_finished"] = timeFinished - expectedData["last_auto_tidy_finished"] = tidyStatus.Data["last_auto_tidy_finished"] if diff := deep.Equal(expectedData, tidyStatus.Data); diff != nil { t.Fatal(diff) @@ -4160,7 +4048,7 @@ func TestBackend_Root_FullCAChain(t *testing.T) { func runFullCAChainTest(t *testing.T, keyType string) { // Generate a root CA at /pki-root - b_root, s_root := CreateBackendWithStorage(t) + b_root, s_root := createBackendWithStorage(t) var err error @@ -4206,7 +4094,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { requireCertInCaChainArray(t, fullChainArray, rootCert, "expected root cert within root issuance pki-root/issue/example") // Now generate an intermediate at /pki-intermediate, signed by the root. - b_int, s_int := CreateBackendWithStorage(t) + b_int, s_int := createBackendWithStorage(t) resp, err = CBWrite(b_int, s_int, "intermediate/generate/exported", map[string]interface{}{ "common_name": "intermediate myvault.com", @@ -4286,7 +4174,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { // Finally, import this signing cert chain into a new mount to ensure // "external" CAs behave as expected. - b_ext, s_ext := CreateBackendWithStorage(t) + b_ext, s_ext := createBackendWithStorage(t) _, err = CBWrite(b_ext, s_ext, "config/ca", map[string]interface{}{ "pem_bundle": intermediateKey + "\n" + intermediateCert + "\n" + rootCert + "\n", @@ -4625,7 +4513,7 @@ func TestBackend_Roles_IssuanceRegression(t *testing.T) { t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) } - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // We need a RSA key so all signature sizes are valid with it. resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ @@ -4794,7 +4682,7 @@ func TestBackend_Roles_KeySizeRegression(t *testing.T) { t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) } - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) tested := 0 for index, test := range testCases { @@ -4806,7 +4694,7 @@ func TestBackend_Roles_KeySizeRegression(t *testing.T) { func TestRootWithExistingKey(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) var err error // Fail requests if type is existing, and we specify the key_type param @@ -4857,7 +4745,6 @@ func TestRootWithExistingKey(t *testing.T) { "key_type": "rsa", "issuer_name": "my-issuer1", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) require.NoError(t, err) require.NotNil(t, resp.Data["certificate"]) myIssuerId1 := resp.Data["issuer_id"] @@ -4940,7 +4827,7 @@ func TestRootWithExistingKey(t *testing.T) { func TestIntermediateWithExistingKey(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) var err error @@ -4973,7 +4860,6 @@ func TestIntermediateWithExistingKey(t *testing.T) { "common_name": "root myvault.com", "key_type": "rsa", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/intermediate/internal"), logical.UpdateOperation), resp, true) require.NoError(t, err) // csr1 := resp.Data["csr"] myKeyId1 := resp.Data["key_id"] @@ -5006,7 +4892,7 @@ func TestIntermediateWithExistingKey(t *testing.T) { func TestIssuanceTTLs(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "common_name": "root example.com", @@ -5084,7 +4970,7 @@ func TestIssuanceTTLs(t *testing.T) { func TestSealWrappedStorageConfigured(t *testing.T) { t.Parallel() - b, _ := CreateBackendWithStorage(t) + b, _ := createBackendWithStorage(t) wrappedEntries := b.Backend.PathsSpecial.SealWrapStorage // Make sure our legacy bundle is within the list @@ -5096,7 +4982,7 @@ func TestSealWrappedStorageConfigured(t *testing.T) { func TestBackend_ConfigCA_WithECParams(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Generated key with OpenSSL: // $ openssl ecparam -out p256.key -name prime256v1 -genkey @@ -5125,7 +5011,7 @@ AwEHoUQDQgAE57NX8bR/nDoW8yRgLswoXBQcjHrdyfuHS0gPwki6BNnfunUzryVb func TestPerIssuerAIA(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Generating a root without anything should not have AIAs. resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -5141,11 +5027,9 @@ func TestPerIssuerAIA(t *testing.T) { require.Empty(t, rootCert.CRLDistributionPoints) // Set some local URLs on the issuer. - resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ + _, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ "issuing_certificates": []string{"https://google.com"}, }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default"), logical.UpdateOperation), resp, true) - require.NoError(t, err) _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ @@ -5212,7 +5096,7 @@ func TestPerIssuerAIA(t *testing.T) { func TestIssuersWithoutCRLBits(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Importing a root without CRL signing bits should work fine. customBundleWithoutCRLBits := ` @@ -5267,7 +5151,6 @@ TgM7RZnmEjNdeaa4M52o7VY= resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ "pem_bundle": customBundleWithoutCRLBits, }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/import/bundle"), logical.UpdateOperation), resp, true) require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data) @@ -5309,8 +5192,7 @@ func TestBackend_IfModifiedSinceHeaders(t *testing.T) { }, } cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - RequestResponseCallback: schema.ResponseValidatingCallback(t), + HandlerFunc: vaulthttp.Handler, }) cluster.Start() defer cluster.Cleanup() @@ -5599,17 +5481,17 @@ func TestBackend_IfModifiedSinceHeaders(t *testing.T) { lastHeaders = client.Headers() } + time.Sleep(4 * time.Second) + + beforeDeltaRotation := time.Now().Add(-2 * time.Second) + // Finally, rebuild the delta CRL and ensure that only that is - // invalidated. We first need to enable it though, and wait for - // all CRLs to rebuild. + // invalidated. We first need to enable it though. _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ "auto_rebuild": true, "enable_delta": true, }) require.NoError(t, err) - time.Sleep(4 * time.Second) - beforeDeltaRotation := time.Now().Add(-2 * time.Second) - resp, err = client.Logical().Read("pki/crl/rotate-delta") require.NoError(t, err) require.NotNil(t, resp) @@ -5659,7 +5541,7 @@ func TestBackend_IfModifiedSinceHeaders(t *testing.T) { func TestBackend_InitializeCertificateCounts(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) ctx := context.Background() // Set up an Issuer and Role @@ -5698,14 +5580,6 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { serials[i] = resp.Data["serial_number"].(string) } - // Turn on certificate counting: - CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": false, - }) - // Assert initialize from clean is correct: - b.initializeStoredCertificateCounts(ctx) - // Revoke certificates A + B revocations := serials[0:2] for _, key := range revocations { @@ -5717,16 +5591,18 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { } } - if b.certCount.Load() != 6 { - t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", b.certCount.Load()) + // Assert initialize from clean is correct: + b.initializeStoredCertificateCounts(ctx) + if atomic.LoadUint32(b.certCount) != 6 { + t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", atomic.LoadUint32(b.certCount)) } - if b.revokedCertCount.Load() != 2 { - t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", b.revokedCertCount.Load()) + if atomic.LoadUint32(b.revokedCertCount) != 2 { + t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", atomic.LoadUint32(b.revokedCertCount)) } // Simulates listing while initialize in progress, by "restarting it" - b.certCount.Store(0) - b.revokedCertCount.Store(0) + atomic.StoreUint32(b.certCount, 0) + atomic.StoreUint32(b.revokedCertCount, 0) b.certsCounted.Store(false) // Revoke certificates C, D @@ -5755,12 +5631,12 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { b.initializeStoredCertificateCounts(ctx) // Test certificate count - if b.certCount.Load() != 8 { - t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", b.certCount.Load()) + if atomic.LoadUint32(b.certCount) != 8 { + t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", *(b.certCount)) } - if b.revokedCertCount.Load() != 4 { - t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", b.revokedCertCount.Load()) + if atomic.LoadUint32(b.revokedCertCount) != 4 { + t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", *(b.revokedCertCount)) } return @@ -5771,7 +5647,7 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { // for fields across the two APIs. func TestBackend_VerifyIssuerUpdateDefaultsMatchCreation(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "common_name": "myvault.com", @@ -5800,7 +5676,7 @@ func TestBackend_VerifyIssuerUpdateDefaultsMatchCreation(t *testing.T) { func TestBackend_VerifyPSSKeysIssuersFailImport(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // PKCS8 parsing fails on this key due to rsaPSS OID rsaOIDKey := ` @@ -6003,7 +5879,7 @@ EBuOIhCv6WiwVyGeTVynuHYkHyw3rIL/zU7N8+zIFV2G2M1UAv5D/eyh/74cr9Of func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Write an empty CRLConfig into storage. crlConfigEntry, err := logical.StorageEntryJSON("config/crl", &crlConfig{}) @@ -6024,498 +5900,6 @@ func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) } -func TestPKI_ListRevokedCerts(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Test empty cluster - resp, err := CBList(b, s, "certs/revoked") - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("certs/revoked"), logical.ListOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err, "failed listing empty cluster") - require.Empty(t, resp.Data, "response map contained data that we did not expect") - - // Set up a mount that we can revoke under (We will create 3 leaf certs, 2 of which will be revoked) - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "test.com", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "error generating root CA") - requireFieldsSetInResp(t, resp, "serial_number") - issuerSerial := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allowed_domains": "test.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - requireSuccessNonNilResponse(t, resp, err, "error setting up pki role") - - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "test1.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert 1") - requireFieldsSetInResp(t, resp, "serial_number") - serial1 := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "test2.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") - requireFieldsSetInResp(t, resp, "serial_number") - serial2 := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "test3.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") - requireFieldsSetInResp(t, resp, "serial_number") - serial3 := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial1}) - requireSuccessNonNilResponse(t, resp, err, "error revoking cert 1") - - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial2}) - requireSuccessNonNilResponse(t, resp, err, "error revoking cert 2") - - // Test that we get back the expected revoked serial numbers. - resp, err = CBList(b, s, "certs/revoked") - requireSuccessNonNilResponse(t, resp, err, "failed listing revoked certs") - requireFieldsSetInResp(t, resp, "keys") - revokedKeys := resp.Data["keys"].([]string) - - require.Contains(t, revokedKeys, serial1) - require.Contains(t, revokedKeys, serial2) - require.Equal(t, 2, len(revokedKeys), "Expected 2 revoked entries got %d: %v", len(revokedKeys), revokedKeys) - - // Test that listing our certs returns a different response - resp, err = CBList(b, s, "certs") - requireSuccessNonNilResponse(t, resp, err, "failed listing written certs") - requireFieldsSetInResp(t, resp, "keys") - certKeys := resp.Data["keys"].([]string) - - require.Contains(t, certKeys, serial1) - require.Contains(t, certKeys, serial2) - require.Contains(t, certKeys, serial3) - require.Contains(t, certKeys, issuerSerial) - require.Equal(t, 4, len(certKeys), "Expected 4 cert entries got %d: %v", len(certKeys), certKeys) -} - -func TestPKI_TemplatedAIAs(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Setting templated AIAs should succeed. - resp, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ - "path": "http://localhost:8200/v1/pki", - "aia_path": "http://localhost:8200/cdn/pki", - }) - require.NoError(t, err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.UpdateOperation), resp, true) - - resp, err = CBRead(b, s, "config/cluster") - require.NoError(t, err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.ReadOperation), resp, true) - - aiaData := map[string]interface{}{ - "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", - "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", - "ocsp_servers": "{{cluster_path}}/ocsp", - "enable_templating": true, - } - _, err = CBWrite(b, s, "config/urls", aiaData) - require.NoError(t, err) - - // But root generation will fail. - rootData := map[string]interface{}{ - "common_name": "Long-Lived Root X1", - "issuer_name": "long-root-x1", - "key_type": "ec", - } - _, err = CBWrite(b, s, "root/generate/internal", rootData) - require.Error(t, err) - require.Contains(t, err.Error(), "unable to parse AIA URL") - - // Clearing the config and regenerating the root should succeed. - _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "crl_distribution_points": "", - "issuing_certificates": "", - "ocsp_servers": "", - "enable_templating": false, - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "root/generate/internal", rootData) - requireSuccessNonNilResponse(t, resp, err) - issuerId := string(resp.Data["issuer_id"].(issuerID)) - - // Now write the original AIA config and sign a leaf. - _, err = CBWrite(b, s, "config/urls", aiaData) - require.NoError(t, err) - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": "true", - "key_type": "ec", - "ttl": "50m", - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "example.com", - }) - requireSuccessNonNilResponse(t, resp, err) - - // Validate the AIA info is correctly templated. - cert := parseCert(t, resp.Data["certificate"].(string)) - require.Equal(t, cert.OCSPServer, []string{"http://localhost:8200/v1/pki/ocsp"}) - require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost:8200/cdn/pki/issuer/" + issuerId + "/der"}) - require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost:8200/v1/pki/issuer/" + issuerId + "/crl/der"}) - - // Modify our issuer to set custom AIAs: these URLs are bad. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "enable_aia_url_templating": "false", - "crl_distribution_points": "a", - "issuing_certificates": "b", - "ocsp_servers": "c", - }) - require.Error(t, err) - - // These URLs are good. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "enable_aia_url_templating": "false", - "crl_distribution_points": "http://localhost/a", - "issuing_certificates": "http://localhost/b", - "ocsp_servers": "http://localhost/c", - }) - - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "example.com", - }) - requireSuccessNonNilResponse(t, resp, err) - - // Validate the AIA info is correctly templated. - cert = parseCert(t, resp.Data["certificate"].(string)) - require.Equal(t, cert.OCSPServer, []string{"http://localhost/c"}) - require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost/b"}) - require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost/a"}) - - // These URLs are bad, but will fail at issuance time due to AIA templating. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "enable_aia_url_templating": "true", - "crl_distribution_points": "a", - "issuing_certificates": "b", - "ocsp_servers": "c", - }) - requireSuccessNonNilResponse(t, resp, err) - require.NotEmpty(t, resp.Warnings) - _, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "example.com", - }) - require.Error(t, err) -} - -func requireSubjectUserIDAttr(t *testing.T, cert string, target string) { - xCert := parseCert(t, cert) - - for _, attr := range xCert.Subject.Names { - var userID string - if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { - if target == "" { - t.Fatalf("expected no UserID (OID: %v) subject attributes in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, cert) - } - - switch aValue := attr.Value.(type) { - case string: - userID = aValue - case []byte: - userID = string(aValue) - default: - t.Fatalf("unknown type for UserID attribute: %v\nCert: %v", attr, cert) - } - - if userID == target { - return - } - } - } - - if target != "" { - t.Fatalf("failed to find UserID (OID: %v) matching %v in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, target, cert) - } -} - -func TestUserIDsInLeafCerts(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // 1. Setup root issuer. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Vault Root CA", - "key_type": "ec", - "ttl": "7200h", - }) - requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") - - // 2. Allow no user IDs. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs should work. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with user ID should fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // 3. Allow any user IDs. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "*", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with one user ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with two user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid,robot", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") - - // 4. Allow one specific user ID. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "humanoid", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with non-approved user ID should fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // - Issue cert with one approved and one non-approved should also fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid,robot", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // 5. Allow two specific user IDs. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "humanoid,robot", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with one approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with other user ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") - - // - Issue cert with unknown user ID will fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot2", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // - Issue cert with both should succeed. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid,robot", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") - - // 6. Use a glob. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "human*", - "key_type": "ec", - "use_csr_sans": true, // setup for further testing. - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with another approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "human", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human") - - // - Issue cert with literal glob. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "human*", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human*") - - // - Still no robotic certs are allowed; will fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // Create a CSR and validate it works with both sign/ and sign-verbatim. - csrTemplate := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: "localhost", - ExtraNames: []pkix.AttributeTypeAndValue{ - { - Type: certutil.SubjectPilotUserIDAttributeOID, - Value: "humanoid", - }, - }, - }, - } - _, _, csrPem := generateCSR(t, &csrTemplate, "ec", 256) - - // Should work with role-based signing. - resp, err = CBWrite(b, s, "sign/testing", map[string]interface{}{ - "csr": csrPem, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign/testing"), logical.UpdateOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Definitely will work with sign-verbatim. - resp, err = CBWrite(b, s, "sign-verbatim", map[string]interface{}{ - "csr": csrPem, - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") -} - -// TestStandby_Operations test proper forwarding for PKI requests from a standby node to the -// active node within a cluster. -func TestStandby_Operations(t *testing.T) { - conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - }, nil, teststorage.InmemBackendSetup) - cluster := vault.NewTestCluster(t, conf, opts) - cluster.Start() - defer cluster.Cleanup() - - testhelpers.WaitForActiveNodeAndStandbys(t, cluster) - standbyCores := testhelpers.DeriveStandbyCores(t, cluster) - require.Greater(t, len(standbyCores), 0, "Need at least one standby core.") - client := standbyCores[0].Client - - mountPKIEndpoint(t, client, "pki") - - _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "root-ca.com", - "ttl": "600h", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - _, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "no_store": "false", // make sure we store this cert - "ttl": "5h", - "key_type": "ec", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - resp, err := client.Logical().Write("pki/issue/example", map[string]interface{}{ - "common_name": "test.example.com", - }) - require.NoError(t, err, "error issuing certificate: %v", err) - require.NotNil(t, resp, "got nil response from issuing request") - serialOfCert := resp.Data["serial_number"].(string) - - resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": serialOfCert, - }) - require.NoError(t, err, "error revoking certificate: %v", err) - require.NotNil(t, resp, "got nil response from revoke request") -} - type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) func isPermDenied(err error) bool { @@ -6744,7 +6128,7 @@ func TestProperAuthing(t *testing.T) { t.Fatal(err) } serial := resp.Data["serial_number"].(string) - eabKid := "13b80844-e60d-42d2-b7e9-152a8e834b90" + paths := map[string]pathAuthChecker{ "ca_chain": shouldBeUnauthedReadList, "cert/ca_chain": shouldBeUnauthedReadList, @@ -6759,20 +6143,10 @@ func TestProperAuthing(t *testing.T) { "cert/delta-crl": shouldBeUnauthedReadList, "cert/delta-crl/raw": shouldBeUnauthedReadList, "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, - "cert/unified-crl": shouldBeUnauthedReadList, - "cert/unified-crl/raw": shouldBeUnauthedReadList, - "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, - "cert/unified-delta-crl": shouldBeUnauthedReadList, - "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, - "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, "certs": shouldBeAuthed, "certs/revoked": shouldBeAuthed, - "certs/revocation-queue": shouldBeAuthed, - "certs/unified-revoked": shouldBeAuthed, - "config/acme": shouldBeAuthed, "config/auto-tidy": shouldBeAuthed, "config/ca": shouldBeAuthed, - "config/cluster": shouldBeAuthed, "config/crl": shouldBeAuthed, "config/issuers": shouldBeAuthed, "config/keys": shouldBeAuthed, @@ -6800,12 +6174,6 @@ func TestProperAuthing(t *testing.T) { "issuer/default/crl/delta": shouldBeUnauthedReadList, "issuer/default/crl/delta/der": shouldBeUnauthedReadList, "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, - "issuer/default/unified-crl": shouldBeUnauthedReadList, - "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, - "issuer/default/unified-crl/der": shouldBeUnauthedReadList, - "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, - "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, - "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, "issuer/default/issue/test": shouldBeAuthed, "issuer/default/resign-crls": shouldBeAuthed, "issuer/default/revoke": shouldBeAuthed, @@ -6856,35 +6224,7 @@ func TestProperAuthing(t *testing.T) { "tidy": shouldBeAuthed, "tidy-cancel": shouldBeAuthed, "tidy-status": shouldBeAuthed, - "unified-crl": shouldBeUnauthedReadList, - "unified-crl/pem": shouldBeUnauthedReadList, - "unified-crl/delta": shouldBeUnauthedReadList, - "unified-crl/delta/pem": shouldBeUnauthedReadList, - "unified-ocsp": shouldBeUnauthedWriteOnly, - "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, - "eab": shouldBeAuthed, - "eab/" + eabKid: shouldBeAuthed, - } - - // Add ACME based paths to the test suite - for _, acmePrefix := range []string{"", "issuer/default/", "roles/test/", "issuer/default/roles/test/"} { - paths[acmePrefix+"acme/directory"] = shouldBeUnauthedReadList - paths[acmePrefix+"acme/new-nonce"] = shouldBeUnauthedReadList - paths[acmePrefix+"acme/new-account"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/revoke-cert"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/new-order"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/orders"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/account/hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo="] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/authorization/29da8c38-7a09-465e-b9a6-3d76802b1afd"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/challenge/29da8c38-7a09-465e-b9a6-3d76802b1afd/http-01"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/order/13b80844-e60d-42d2-b7e9-152a8e834b90"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/order/13b80844-e60d-42d2-b7e9-152a8e834b90/finalize"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"acme/order/13b80844-e60d-42d2-b7e9-152a8e834b90/cert"] = shouldBeUnauthedWriteOnly - - // Make sure this new-eab path is auth'd - paths[acmePrefix+"acme/new-eab"] = shouldBeAuthed } - for path, checkerType := range paths { checker := pathAuthChckerMap[checkerType] checker(t, client, "pki/"+path, token) @@ -6896,6 +6236,13 @@ func TestProperAuthing(t *testing.T) { t.Fatalf("failed to get openapi data: %v", err) } + for openapi_path := range openAPIResp.Data["paths"].(map[string]interface{}) { + if strings.HasPrefix(openapi_path, "/pki/") && strings.Contains(openapi_path, "//") { + t.Skipf("Skipping OpenAPI validation due to malformed PKI paths: %v", openapi_path) + return + } + } + validatedPath := false for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { if !strings.HasPrefix(openapi_path, "/pki/") { @@ -6928,21 +6275,6 @@ func TestProperAuthing(t *testing.T) { if strings.Contains(raw_path, "{serial}") { raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) } - if strings.Contains(raw_path, "acme/account/") && strings.Contains(raw_path, "{kid}") { - raw_path = strings.ReplaceAll(raw_path, "{kid}", "hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo=") - } - if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{auth_id}") { - raw_path = strings.ReplaceAll(raw_path, "{auth_id}", "29da8c38-7a09-465e-b9a6-3d76802b1afd") - } - if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{challenge_type}") { - raw_path = strings.ReplaceAll(raw_path, "{challenge_type}", "http-01") - } - if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{order_id}") { - raw_path = strings.ReplaceAll(raw_path, "{order_id}", "13b80844-e60d-42d2-b7e9-152a8e834b90") - } - if strings.Contains(raw_path, "eab") && strings.Contains(raw_path, "{key_id}") { - raw_path = strings.ReplaceAll(raw_path, "{key_id}", eabKid) - } handler, present := paths[raw_path] if !present { @@ -6981,123 +6313,6 @@ func TestProperAuthing(t *testing.T) { } } -func TestPatchIssuer(t *testing.T) { - t.Parallel() - - type TestCase struct { - Field string - Before interface{} - Patched interface{} - } - testCases := []TestCase{ - { - Field: "issuer_name", - Before: "root", - Patched: "root-new", - }, - { - Field: "leaf_not_after_behavior", - Before: "err", - Patched: "permit", - }, - { - Field: "usage", - Before: "crl-signing,issuing-certificates,ocsp-signing,read-only", - Patched: "issuing-certificates,read-only", - }, - { - Field: "revocation_signature_algorithm", - Before: "ECDSAWithSHA256", - Patched: "ECDSAWithSHA384", - }, - { - Field: "issuing_certificates", - Before: []string{"http://localhost/v1/pki-1/ca"}, - Patched: []string{"http://localhost/v1/pki/ca"}, - }, - { - Field: "crl_distribution_points", - Before: []string{"http://localhost/v1/pki-1/crl"}, - Patched: []string{"http://localhost/v1/pki/crl"}, - }, - { - Field: "ocsp_servers", - Before: []string{"http://localhost/v1/pki-1/ocsp"}, - Patched: []string{"http://localhost/v1/pki/ocsp"}, - }, - { - Field: "enable_aia_url_templating", - Before: false, - Patched: true, - }, - { - Field: "manual_chain", - Before: []string(nil), - Patched: []string{"self"}, - }, - } - - for index, testCase := range testCases { - t.Logf("index: %v / tc: %v", index, testCase) - - b, s := CreateBackendWithStorage(t) - - // 1. Setup root issuer. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Vault Root CA", - "key_type": "ec", - "ttl": "7200h", - "issuer_name": "root", - }) - requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") - id := string(resp.Data["issuer_id"].(issuerID)) - - // 2. Enable Cluster paths - resp, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "path": "https://localhost/v1/pki", - "aia_path": "http://localhost/v1/pki", - }) - requireSuccessNonNilResponse(t, resp, err, "failed updating AIA config") - - // 3. Add AIA information - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "issuing_certificates": "http://localhost/v1/pki-1/ca", - "crl_distribution_points": "http://localhost/v1/pki-1/crl", - "ocsp_servers": "http://localhost/v1/pki-1/ocsp", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up issuer") - - // 4. Read the issuer before. - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer before") - require.Equal(t, testCase.Before, resp.Data[testCase.Field], "bad expectations") - - // 5. Perform modification. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - testCase.Field: testCase.Patched, - }) - requireSuccessNonNilResponse(t, resp, err, "failed patching root issuer") - - if testCase.Field != "manual_chain" { - require.Equal(t, testCase.Patched, resp.Data[testCase.Field], "failed persisting value") - } else { - // self->id - require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") - } - - // 6. Ensure it stuck - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer after") - - if testCase.Field != "manual_chain" { - require.Equal(t, testCase.Patched, resp.Data[testCase.Field]) - } else { - // self->id - require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") - } - } -} - var ( initTest sync.Once rsaCAKey string diff --git a/builtin/logical/pki/ca_test.go b/builtin/logical/pki/ca_test.go index 7dbffef24774a..2c33397114259 100644 --- a/builtin/logical/pki/ca_test.go +++ b/builtin/logical/pki/ca_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -307,14 +304,6 @@ func runSteps(t *testing.T, rootB, intB *backend, client *api.Client, rootName, if path == "issuer/default/json" { // Preserves the new line. expected += "\n" - _, present := resp.Data["issuer_id"] - if !present { - t.Fatalf("expected issuer/default/json to include issuer_id") - } - _, present = resp.Data["issuer_name"] - if !present { - t.Fatalf("expected issuer/default/json to include issuer_name") - } } if diff := deep.Equal(resp.Data["certificate"].(string), expected); diff != nil { t.Fatal(diff) diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go index 85dc243e58bd4..d2a59f34bdd2f 100644 --- a/builtin/logical/pki/ca_util.go +++ b/builtin/logical/pki/ca_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -61,7 +58,6 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte AllowedURISANs: []string{"*"}, AllowedOtherSANs: []string{"*"}, AllowedSerialNumbers: []string{"*"}, - AllowedUserIDs: []string{"*"}, OU: data.Get("ou").([]string), Organization: data.Get("organization").([]string), Country: data.Get("country").([]string), @@ -189,7 +185,7 @@ func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (s if kmsRequestedFromFieldData(data) { keyId, err := getManagedKeyId(data) if err != nil { - return "", 0, errors.New("unable to determine managed key id: " + err.Error()) + return "", 0, errors.New("unable to determine managed key id" + err.Error()) } pubKeyManagedKey, err := getManagedKeyPublicKey(sc.Context, sc.Backend, keyId) @@ -264,61 +260,3 @@ func existingKeyGeneratorFromBytes(key *keyEntry) certutil.KeyGenerator { return nil } } - -func buildSignVerbatimRoleWithNoData(role *roleEntry) *roleEntry { - data := &framework.FieldData{ - Raw: map[string]interface{}{}, - Schema: addSignVerbatimRoleFields(map[string]*framework.FieldSchema{}), - } - return buildSignVerbatimRole(data, role) -} - -func buildSignVerbatimRole(data *framework.FieldData, role *roleEntry) *roleEntry { - entry := &roleEntry{ - AllowLocalhost: true, - AllowAnyName: true, - AllowIPSANs: true, - AllowWildcardCertificates: new(bool), - EnforceHostnames: false, - KeyType: "any", - UseCSRCommonName: true, - UseCSRSANs: true, - AllowedOtherSANs: []string{"*"}, - AllowedSerialNumbers: []string{"*"}, - AllowedURISANs: []string{"*"}, - AllowedUserIDs: []string{"*"}, - CNValidations: []string{"disabled"}, - GenerateLease: new(bool), - // If adding new fields to be read, update the field list within addSignVerbatimRoleFields - KeyUsage: data.Get("key_usage").([]string), - ExtKeyUsage: data.Get("ext_key_usage").([]string), - ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), - SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), - } - *entry.AllowWildcardCertificates = true - *entry.GenerateLease = false - - if role != nil { - if role.TTL > 0 { - entry.TTL = role.TTL - } - if role.MaxTTL > 0 { - entry.MaxTTL = role.MaxTTL - } - if role.GenerateLease != nil { - *entry.GenerateLease = *role.GenerateLease - } - if role.NotBeforeDuration > 0 { - entry.NotBeforeDuration = role.NotBeforeDuration - } - entry.NoStore = role.NoStore - entry.Issuer = role.Issuer - } - - if len(entry.Issuer) == 0 { - entry.Issuer = defaultRef - } - - return entry -} diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index 1795fcab8aac9..5608310637567 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -1,9 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( + "context" "crypto" "crypto/ecdsa" "crypto/ed25519" @@ -88,11 +86,6 @@ func getFormat(data *framework.FieldData) string { // loading using the legacyBundleShimID and should be used with care. This should be called only once // within the request path otherwise you run the risk of a race condition with the issuer migration on perf-secondaries. func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, error) { - bundle, _, err := sc.fetchCAInfoWithIssuer(issuerRef, usage) - return bundle, err -} - -func (sc *storageContext) fetchCAInfoWithIssuer(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, issuerID, error) { var issuerId issuerID if sc.Backend.useLegacyBundleCaStorage() { @@ -104,16 +97,11 @@ func (sc *storageContext) fetchCAInfoWithIssuer(issuerRef string, usage issuerUs issuerId, err = sc.resolveIssuerReference(issuerRef) if err != nil { // Usually a bad label from the user or mis-configured default. - return nil, IssuerRefNotFound, errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} } } - bundle, err := sc.fetchCAInfoByIssuerId(issuerId, usage) - if err != nil { - return nil, IssuerRefNotFound, err - } - - return bundle, issuerId, nil + return sc.fetchCAInfoByIssuerId(issuerId, usage) } // fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. @@ -156,15 +144,15 @@ func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerU entries, err := entry.GetAIAURLs(sc) if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} } caInfo.URLs = entries return caInfo, nil } -func fetchCertBySerialBigInt(sc *storageContext, prefix string, serial *big.Int) (*logical.StorageEntry, error) { - return fetchCertBySerial(sc, prefix, serialFromBigInt(serial)) +func fetchCertBySerialBigInt(ctx context.Context, b *backend, req *logical.Request, prefix string, serial *big.Int) (*logical.StorageEntry, error) { + return fetchCertBySerial(ctx, b, req, prefix, serialFromBigInt(serial)) } // Allows fetching certificates from the backend; it handles the slightly @@ -172,7 +160,7 @@ func fetchCertBySerialBigInt(sc *storageContext, prefix string, serial *big.Int) // // Support for fetching CA certificates was removed, due to the new issuers // changes. -func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.StorageEntry, error) { +func fetchCertBySerial(ctx context.Context, b *backend, req *logical.Request, prefix, serial string) (*logical.StorageEntry, error) { var path, legacyPath string var err error var certEntry *logical.StorageEntry @@ -186,26 +174,17 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor case strings.HasPrefix(prefix, "revoked/"): legacyPath = "revoked/" + colonSerial path = "revoked/" + hyphenSerial - case serial == legacyCRLPath || serial == deltaCRLPath || serial == unifiedCRLPath || serial == unifiedDeltaCRLPath: - warnings, err := sc.Backend.crlBuilder.rebuildIfForced(sc) - if err != nil { + case serial == legacyCRLPath || serial == deltaCRLPath: + if err = b.crlBuilder.rebuildIfForced(ctx, b, req); err != nil { return nil, err } - if len(warnings) > 0 { - msg := "During rebuild of CRL for cert fetch, got the following warnings:" - for index, warning := range warnings { - msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) - } - sc.Backend.Logger().Warn(msg) - } - - unified := serial == unifiedCRLPath || serial == unifiedDeltaCRLPath - path, err = sc.resolveIssuerCRLPath(defaultRef, unified) + sc := b.makeStorageContext(ctx, req.Storage) + path, err = sc.resolveIssuerCRLPath(defaultRef) if err != nil { return nil, err } - if serial == deltaCRLPath || serial == unifiedDeltaCRLPath { + if serial == deltaCRLPath { if sc.Backend.useLegacyBundleCaStorage() { return nil, fmt.Errorf("refusing to serve delta CRL with legacy CA bundle") } @@ -217,7 +196,7 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor path = "certs/" + hyphenSerial } - certEntry, err = sc.Storage.Get(sc.Context, path) + certEntry, err = req.Storage.Get(ctx, path) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)} } @@ -237,7 +216,7 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // always manifest on Windows, and thus the initial check for a revoked // cert fails would return an error when the cert isn't revoked, preventing // the happy path from working. - certEntry, _ = sc.Storage.Get(sc.Context, legacyPath) + certEntry, _ = req.Storage.Get(ctx, legacyPath) if certEntry == nil { return nil, nil } @@ -247,17 +226,17 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // Update old-style paths to new-style paths certEntry.Key = path - certsCounted := sc.Backend.certsCounted.Load() - if err = sc.Storage.Put(sc.Context, certEntry); err != nil { + certsCounted := b.certsCounted.Load() + if err = req.Storage.Put(ctx, certEntry); err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)} } - if err = sc.Storage.Delete(sc.Context, legacyPath); err != nil { + if err = req.Storage.Delete(ctx, legacyPath); err != nil { // If we fail here, we have an extra (copy) of a cert in storage, add to metrics: switch { case strings.HasPrefix(prefix, "revoked/"): - sc.Backend.ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted, path) + b.incrementTotalRevokedCertificatesCount(certsCounted, path) default: - sc.Backend.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, path) + b.incrementTotalCertificatesCount(certsCounted, path) } return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)} } @@ -336,71 +315,6 @@ func validateCommonName(b *backend, data *inputBundle, name string) string { return "" } -func isWildcardDomain(name string) bool { - // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier - // RFC 2818 which no modern client will validate against, there are two - // main types of wildcards, each with a single wildcard specifier (`*`, - // functionally different from the `*` used as a glob from the - // AllowGlobDomains parsing path) in the left-most label: - // - // 1. Entire label is a single wildcard character (most common and - // well-supported), - // 2. Part of the label contains a single wildcard character (e.g. per - // RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). - // - // We permit issuance of both but not the older RFC 2818 style under - // the new AllowWildcardCertificates option. However, anything with a - // glob character is technically a wildcard, though not a valid one. - - return strings.Contains(name, "*") -} - -func validateWildcardDomain(name string) (string, string, error) { - // See note in isWildcardDomain(...) about the definition of a wildcard - // domain. - var wildcardLabel string - var reducedName string - - if strings.Count(name, "*") > 1 { - // As mentioned above, only one wildcard character is permitted - // under RFC 6125 semantics. - return wildcardLabel, reducedName, fmt.Errorf("expected only one wildcard identifier in the given domain name") - } - - // Split the Common Name into two parts: a left-most label and the - // remaining segments (if present). - splitLabels := strings.SplitN(name, ".", 2) - if len(splitLabels) != 2 { - // We've been given a single-part domain name that consists - // entirely of a wildcard. This is a little tricky to handle, - // but EnforceHostnames validates both the wildcard-containing - // label and the reduced name, but _only_ the latter if it is - // non-empty. This allows us to still validate the only label - // component matches hostname expectations still. - wildcardLabel = splitLabels[0] - reducedName = "" - } else { - // We have a (at least) two label domain name. But before we can - // update our names, we need to validate the wildcard ended up - // in the segment we expected it to. While this is (kinda) - // validated under EnforceHostnames's leftWildLabelRegex, we - // still need to validate it in the non-enforced mode. - // - // By validated assumption above, we know there's strictly one - // wildcard in this domain so we only need to check the wildcard - // label or the reduced name (as one is equivalent to the other). - // Because we later assume reducedName _lacks_ wildcard segments, - // we validate that. - wildcardLabel = splitLabels[0] - reducedName = splitLabels[1] - if strings.Contains(reducedName, "*") { - return wildcardLabel, reducedName, fmt.Errorf("expected wildcard to only be present in left-most domain label") - } - } - - return wildcardLabel, reducedName, nil -} - // Given a set of requested names for a certificate, verifies that all of them // match the various toggles set in the role for controlling issuance. // If one does not pass, it is returned in the string argument. @@ -435,7 +349,21 @@ func validateNames(b *backend, data *inputBundle, names []string) string { isEmail = true } - if isWildcardDomain(reducedName) { + // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier + // RFC 2818 which no modern client will validate against, there are two + // main types of wildcards, each with a single wildcard specifier (`*`, + // functionally different from the `*` used as a glob from the + // AllowGlobDomains parsing path) in the left-most label: + // + // 1. Entire label is a single wildcard character (most common and + // well-supported), + // 2. Part of the label contains a single wildcard character (e.g. per + /// RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). + // + // We permit issuance of both but not the older RFC 2818 style under + // the new AllowWildcardCertificates option. However, anything with a + // glob character is technically a wildcard. + if strings.Contains(reducedName, "*") { // Regardless of later rejections below, this common name contains // a wildcard character and is thus technically a wildcard name. isWildcard = true @@ -450,12 +378,42 @@ func validateNames(b *backend, data *inputBundle, names []string) string { return name } - // Check that this domain is well-formatted per RFC 6125. - var err error - wildcardLabel, reducedName, err = validateWildcardDomain(reducedName) - if err != nil { + if strings.Count(reducedName, "*") > 1 { + // As mentioned above, only one wildcard character is permitted + // under RFC 6125 semantics. return name } + + // Split the Common Name into two parts: a left-most label and the + // remaining segments (if present). + splitLabels := strings.SplitN(reducedName, ".", 2) + if len(splitLabels) != 2 { + // We've been given a single-part domain name that consists + // entirely of a wildcard. This is a little tricky to handle, + // but EnforceHostnames validates both the wildcard-containing + // label and the reduced name, but _only_ the latter if it is + // non-empty. This allows us to still validate the only label + // component matches hostname expectations still. + wildcardLabel = splitLabels[0] + reducedName = "" + } else { + // We have a (at least) two label domain name. But before we can + // update our names, we need to validate the wildcard ended up + // in the segment we expected it to. While this is (kinda) + // validated under EnforceHostnames's leftWildLabelRegex, we + // still need to validate it in the non-enforced mode. + // + // By validated assumption above, we know there's strictly one + // wildcard in this domain so we only need to check the wildcard + // label or the reduced name (as one is equivalent to the other). + // Because we later assume reducedName _lacks_ wildcard segments, + // we validate that. + wildcardLabel = splitLabels[0] + reducedName = splitLabels[1] + if strings.Contains(reducedName, "*") { + return name + } + } } // Email addresses using wildcard domain names do not make sense @@ -684,33 +642,6 @@ func parseOtherSANs(others []string) (map[string][]string, error) { return result, nil } -// Returns bool stating whether the given UserId is Valid -func validateUserId(data *inputBundle, userId string) bool { - allowedList := data.role.AllowedUserIDs - - if len(allowedList) == 0 { - // Nothing is allowed. - return false - } - - if strutil.StrListContainsCaseInsensitive(allowedList, userId) { - return true - } - - for _, rolePattern := range allowedList { - if rolePattern == "" { - continue - } - - if strings.Contains(rolePattern, "*") && glob.Glob(rolePattern, userId) { - return true - } - } - - // No matches. - return false -} - func validateSerialNumber(data *inputBundle, serialNumber string) string { valid := false if len(data.role.AllowedSerialNumbers) > 0 { @@ -768,15 +699,9 @@ func generateCert(sc *storageContext, // issuer entry yet, we default to the global URLs. entries, err := getGlobalAIAURLs(ctx, sc.Storage) if err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} - } - - uris, err := entries.toURLEntries(sc, issuerID("")) - if err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse AIA URL information: %v\nUsing templated AIA URL's {{issuer_id}} field when generating root certificates is not supported.", err)} + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} } - - data.Params.URLs = uris + data.Params.URLs = entries if input.role.MaxPathLength == nil { data.Params.MaxPathLength = -1 @@ -1002,12 +927,6 @@ func signCert(b *backend, if isCA { creation.Params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) - } else { - for _, ext := range csr.Extensions { - if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { - warnings = append(warnings, "specified CSR contained a Basic Constraints extension that was ignored during issuance") - } - } } parsedBundle, err := certutil.SignCertificate(creation) @@ -1379,11 +1298,71 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn } // Get the TTL and verify it against the max allowed - notAfter, ttlWarnings, err := getCertificateNotAfter(b, data, caSign) - if err != nil { - return nil, warnings, err + var ttl time.Duration + var maxTTL time.Duration + var notAfter time.Time + var err error + { + ttl = time.Duration(data.apiData.Get("ttl").(int)) * time.Second + notAfterAlt := data.role.NotAfter + if notAfterAlt == "" { + notAfterAltRaw, ok := data.apiData.GetOk("not_after") + if ok { + notAfterAlt = notAfterAltRaw.(string) + } + + } + if ttl > 0 && notAfterAlt != "" { + return nil, nil, errutil.UserError{ + Err: "Either ttl or not_after should be provided. Both should not be provided in the same request.", + } + } + + if ttl == 0 && data.role.TTL > 0 { + ttl = data.role.TTL + } + + if data.role.MaxTTL > 0 { + maxTTL = data.role.MaxTTL + } + + if ttl == 0 { + ttl = b.System().DefaultLeaseTTL() + } + if maxTTL == 0 { + maxTTL = b.System().MaxLeaseTTL() + } + if ttl > maxTTL { + warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) + ttl = maxTTL + } + + if notAfterAlt != "" { + notAfter, err = time.Parse(time.RFC3339, notAfterAlt) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + } else { + notAfter = time.Now().Add(ttl) + } + if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { + // If it's not self-signed, verify that the issued certificate + // won't be valid past the lifetime of the CA certificate, and + // act accordingly. This is dependent based on the issuer's + // LeafNotAfterBehavior argument. + switch caSign.LeafNotAfterBehavior { + case certutil.PermitNotAfterBehavior: + // Explicitly do nothing. + case certutil.TruncateNotAfterBehavior: + notAfter = caSign.Certificate.NotAfter + case certutil.ErrNotAfterBehavior: + fallthrough + default: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "cannot satisfy request, as TTL would result in notAfter %s that is beyond the expiration of the CA certificate at %s", notAfter.Format(time.RFC3339Nano), caSign.Certificate.NotAfter.Format(time.RFC3339Nano))} + } + } } - warnings = append(warnings, ttlWarnings...) // Parse SKID from the request for cross-signing. var skid []byte @@ -1406,41 +1385,6 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn } } - // Add UserIDs into the Subject, if the request type supports it. - if _, present := data.apiData.Schema["user_ids"]; present { - rawUserIDs := data.apiData.Get("user_ids").([]string) - - // Only take UserIDs from CSR if one was not supplied via API. - if len(rawUserIDs) == 0 && csr != nil { - for _, attr := range csr.Subject.Names { - if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { - switch aValue := attr.Value.(type) { - case string: - rawUserIDs = append(rawUserIDs, aValue) - case []byte: - rawUserIDs = append(rawUserIDs, string(aValue)) - default: - return nil, nil, errutil.UserError{Err: "unknown type for user_id attribute in CSR's Subject"} - } - } - } - } - - // Check for bad userIDs and add to the subject. - if len(rawUserIDs) > 0 { - for _, value := range rawUserIDs { - if !validateUserId(data, value) { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("user_id %v is not allowed by this role", value)} - } - - subject.ExtraNames = append(subject.ExtraNames, pkix.AttributeTypeAndValue{ - Type: certutil.SubjectPilotUserIDAttributeOID, - Value: value, - }) - } - } - } - creation := &certutil.CreationBundle{ Params: &certutil.CreationParameters{ Subject: subject, @@ -1499,73 +1443,6 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn return creation, warnings, nil } -// getCertificateNotAfter compute a certificate's NotAfter date based on the mount ttl, role, signing bundle and input -// api data being sent. Returns a NotAfter time, a set of warnings or an error. -func getCertificateNotAfter(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle) (time.Time, []string, error) { - var warnings []string - var maxTTL time.Duration - var notAfter time.Time - var err error - - ttl := time.Duration(data.apiData.Get("ttl").(int)) * time.Second - notAfterAlt := data.role.NotAfter - if notAfterAlt == "" { - notAfterAltRaw, ok := data.apiData.GetOk("not_after") - if ok { - notAfterAlt = notAfterAltRaw.(string) - } - } - if ttl > 0 && notAfterAlt != "" { - return time.Time{}, warnings, errutil.UserError{Err: "Either ttl or not_after should be provided. Both should not be provided in the same request."} - } - - if ttl == 0 && data.role.TTL > 0 { - ttl = data.role.TTL - } - - if data.role.MaxTTL > 0 { - maxTTL = data.role.MaxTTL - } - - if ttl == 0 { - ttl = b.System().DefaultLeaseTTL() - } - if maxTTL == 0 { - maxTTL = b.System().MaxLeaseTTL() - } - if ttl > maxTTL { - warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) - ttl = maxTTL - } - - if notAfterAlt != "" { - notAfter, err = time.Parse(time.RFC3339, notAfterAlt) - if err != nil { - return notAfter, warnings, errutil.UserError{Err: err.Error()} - } - } else { - notAfter = time.Now().Add(ttl) - } - if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { - // If it's not self-signed, verify that the issued certificate - // won't be valid past the lifetime of the CA certificate, and - // act accordingly. This is dependent based on the issuer's - // LeafNotAfterBehavior argument. - switch caSign.LeafNotAfterBehavior { - case certutil.PermitNotAfterBehavior: - // Explicitly do nothing. - case certutil.TruncateNotAfterBehavior: - notAfter = caSign.Certificate.NotAfter - case certutil.ErrNotAfterBehavior: - fallthrough - default: - return time.Time{}, warnings, errutil.UserError{Err: fmt.Sprintf( - "cannot satisfy request, as TTL would result in notAfter of %s that is beyond the expiration of the CA certificate at %s", notAfter.UTC().Format(time.RFC3339Nano), caSign.Certificate.NotAfter.UTC().Format(time.RFC3339Nano))} - } - } - return notAfter, warnings, nil -} - func convertRespToPKCS8(resp *logical.Response) error { privRaw, ok := resp.Data["private_key"] if !ok { diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index 7fb811cb8fcfe..ba2e6f8106116 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -16,8 +13,7 @@ import ( func TestPki_FetchCertBySerial(t *testing.T) { t.Parallel() - b, storage := CreateBackendWithStorage(t) - sc := b.makeStorageContext(ctx, storage) + b, storage := createBackendWithStorage(t) cases := map[string]struct { Req *logical.Request @@ -51,7 +47,7 @@ func TestPki_FetchCertBySerial(t *testing.T) { t.Fatalf("error writing to storage on %s colon-based storage path: %s", name, err) } - certEntry, err := fetchCertBySerial(sc, tc.Prefix, tc.Serial) + certEntry, err := fetchCertBySerial(context.Background(), b, tc.Req, tc.Prefix, tc.Serial) if err != nil { t.Fatalf("error on %s for colon-based storage path: %s", name, err) } @@ -86,7 +82,7 @@ func TestPki_FetchCertBySerial(t *testing.T) { t.Fatalf("error writing to storage on %s hyphen-based storage path: %s", name, err) } - certEntry, err := fetchCertBySerial(sc, tc.Prefix, tc.Serial) + certEntry, err := fetchCertBySerial(context.Background(), b, tc.Req, tc.Prefix, tc.Serial) if err != nil || certEntry == nil { t.Fatalf("error on %s for hyphen-based storage path: err: %v, entry: %v", name, err, certEntry) } diff --git a/builtin/logical/pki/chain_test.go b/builtin/logical/pki/chain_test.go index e76df359e9ed1..746a378cbaf47 100644 --- a/builtin/logical/pki/chain_test.go +++ b/builtin/logical/pki/chain_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -312,8 +309,6 @@ func (c CBValidateChain) PrettyChain(t testing.TB, chain []string, knownCerts ma } func ToCertificate(t testing.TB, cert string) *x509.Certificate { - t.Helper() - block, _ := pem.Decode([]byte(cert)) if block == nil { t.Fatalf("Unable to parse certificate: nil PEM block\n[%v]\n", cert) @@ -328,8 +323,6 @@ func ToCertificate(t testing.TB, cert string) *x509.Certificate { } func ToCRL(t testing.TB, crl string, issuer *x509.Certificate) *pkix.CertificateList { - t.Helper() - block, _ := pem.Decode([]byte(crl)) if block == nil { t.Fatalf("Unable to parse CRL: nil PEM block\n[%v]\n", crl) @@ -1605,7 +1598,7 @@ var chainBuildingTestCases = []CBTestScenario{ func Test_CAChainBuilding(t *testing.T) { t.Parallel() for testIndex, testCase := range chainBuildingTestCases { - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) knownKeys := make(map[string]string) knownCerts := make(map[string]string) @@ -1627,7 +1620,7 @@ func BenchmarkChainBuilding(benchies *testing.B) { bench.StopTimer() bench.ResetTimer() - b, s := CreateBackendWithStorage(bench) + b, s := createBackendWithStorage(bench) knownKeys := make(map[string]string) knownCerts := make(map[string]string) diff --git a/builtin/logical/pki/chain_util.go b/builtin/logical/pki/chain_util.go index e884f075588ee..6b7a6a5c7bebd 100644 --- a/builtin/logical/pki/chain_util.go +++ b/builtin/logical/pki/chain_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( diff --git a/builtin/logical/pki/cmd/pki/main.go b/builtin/logical/pki/cmd/pki/main.go index 7c804be23713d..ffcb4521c8958 100644 --- a/builtin/logical/pki/cmd/pki/main.go +++ b/builtin/logical/pki/cmd/pki/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: pki.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/pki/config_util.go b/builtin/logical/pki/config_util.go index 80814550c7531..0d8b8dad26e43 100644 --- a/builtin/logical/pki/config_util.go +++ b/builtin/logical/pki/config_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -100,21 +97,21 @@ func (sc *storageContext) changeDefaultIssuerTimestamps(oldDefault issuerID, new issuer.LastModified = now err = sc.writeIssuer(issuer) if err != nil { - return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %w", thisId, err) + return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %v", thisId, err) } } - // Fetch and update the internalCRLConfigEntry (3&4). + // Fetch and update the localCRLConfigEntry (3&4). cfg, err := sc.getLocalCRLConfig() if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %w", err) + return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %v", err) } cfg.LastModified = now cfg.DeltaLastModified = now err = sc.setLocalCRLConfig(cfg) if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %w", err) + return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %v", err) } return nil diff --git a/builtin/logical/pki/crl_test.go b/builtin/logical/pki/crl_test.go index aaa67ba77d805..8f15aae2f4cb3 100644 --- a/builtin/logical/pki/crl_test.go +++ b/builtin/logical/pki/crl_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -12,8 +9,6 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" @@ -24,7 +19,7 @@ import ( func TestBackend_CRL_EnableDisableRoot(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "ttl": "40h", @@ -40,7 +35,7 @@ func TestBackend_CRL_EnableDisableRoot(t *testing.T) { func TestBackend_CRLConfigUpdate(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Write a legacy config to storage. type legacyConfig struct { @@ -84,7 +79,7 @@ func TestBackend_CRLConfig(t *testing.T) { for _, tc := range tests { name := fmt.Sprintf("%s-%t-%t", tc.expiry, tc.disable, tc.ocspDisable) t.Run(name, func(t *testing.T) { - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ "expiry": tc.expiry, @@ -94,12 +89,9 @@ func TestBackend_CRLConfig(t *testing.T) { "auto_rebuild": tc.autoRebuild, "auto_rebuild_grace_period": tc.autoRebuildGracePeriod, }) - requireSuccessNonNilResponse(t, resp, err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/crl"), logical.UpdateOperation), resp, true) + requireSuccessNilResponse(t, resp, err) resp, err = CBRead(b, s, "config/crl") - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/crl"), logical.ReadOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err) requireFieldsSetInResp(t, resp, "disable", "expiry", "ocsp_disable", "auto_rebuild", "auto_rebuild_grace_period") @@ -131,7 +123,7 @@ func TestBackend_CRLConfig(t *testing.T) { for _, tc := range badValueTests { name := fmt.Sprintf("bad-%s-%s-%s", tc.expiry, tc.disable, tc.ocspDisable) t.Run(name, func(t *testing.T) { - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ "expiry": tc.expiry, @@ -172,7 +164,7 @@ func TestBackend_CRL_AllKeyTypeSigAlgos(t *testing.T) { for index, tc := range testCases { t.Logf("tv %v", index) - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ "ttl": "40h", @@ -215,7 +207,7 @@ func TestBackend_CRL_EnableDisableIntermediateWithoutRoot(t *testing.T) { } func crlEnableDisableIntermediateTestForBackend(t *testing.T, withRoot bool) { - b_root, s_root := CreateBackendWithStorage(t) + b_root, s_root := createBackendWithStorage(t) resp, err := CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ "ttl": "40h", @@ -226,7 +218,7 @@ func crlEnableDisableIntermediateTestForBackend(t *testing.T, withRoot bool) { } rootSerial := resp.Data["serial_number"].(string) - b_int, s_int := CreateBackendWithStorage(t) + b_int, s_int := createBackendWithStorage(t) resp, err = CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ "common_name": "intermediate myvault.com", @@ -379,7 +371,7 @@ func crlEnableDisableTestForBackend(t *testing.T, b *backend, s logical.Storage, func TestBackend_Secondary_CRL_Rebuilding(t *testing.T) { t.Parallel() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Write out the issuer/key to storage without going through the api call as replication would. @@ -404,7 +396,7 @@ func TestBackend_Secondary_CRL_Rebuilding(t *testing.T) { func TestCrlRebuilder(t *testing.T) { t.Parallel() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Write out the issuer/key to storage without going through the api call as replication would. @@ -412,21 +404,19 @@ func TestCrlRebuilder(t *testing.T) { _, _, err := sc.writeCaBundle(bundle, "", "") require.NoError(t, err) + req := &logical.Request{Storage: s} cb := newCRLBuilder(true /* can rebuild and write CRLs */) // Force an initial build - warnings, err := cb.rebuild(sc, true) + err = cb.rebuild(ctx, b, req, true) require.NoError(t, err, "Failed to rebuild CRL") - require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") resp := requestCrlFromBackend(t, s, b) crl1 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) // We shouldn't rebuild within this call. - warnings, err = cb.rebuildIfForced(sc) + err = cb.rebuildIfForced(ctx, b, req) require.NoError(t, err, "Failed to rebuild if forced CRL") - require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") - resp = requestCrlFromBackend(t, s, b) crl2 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) require.Equal(t, crl1.ThisUpdate, crl2.ThisUpdate, "According to the update field, we rebuilt the CRL") @@ -442,12 +432,9 @@ func TestCrlRebuilder(t *testing.T) { // This should rebuild the CRL cb.requestRebuildIfActiveNode(b) - warnings, err = cb.rebuildIfForced(sc) + err = cb.rebuildIfForced(ctx, b, req) require.NoError(t, err, "Failed to rebuild if forced CRL") - require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") resp = requestCrlFromBackend(t, s, b) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/pem"), logical.ReadOperation), resp, true) - crl3 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) require.True(t, crl1.ThisUpdate.Before(crl3.ThisUpdate), "initial crl time: %#v not before next crl rebuild time: %#v", crl1.ThisUpdate, crl3.ThisUpdate) @@ -456,7 +443,7 @@ func TestCrlRebuilder(t *testing.T) { func TestBYOC(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Create a root CA. resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -575,7 +562,7 @@ func TestBYOC(t *testing.T) { func TestPoP(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Create a root CA. resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -607,11 +594,10 @@ func TestPoP(t *testing.T) { require.NotNil(t, resp) require.NotEmpty(t, resp.Data["certificate"]) - resp, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ "certificate": resp.Data["certificate"], "private_key": resp.Data["private_key"], }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke-with-key"), logical.UpdateOperation), resp, true) require.NoError(t, err) // Issue a second leaf, but hold onto it for now. @@ -736,7 +722,7 @@ func TestPoP(t *testing.T) { func TestIssuerRevocation(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Write a config with auto-rebuilding so that we can verify stuff doesn't // appear on the delta CRL. @@ -781,16 +767,12 @@ func TestIssuerRevocation(t *testing.T) { // Revoke it. resp, err = CBWrite(b, s, "issuer/root2/revoke", map[string]interface{}{}) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/root2/revoke"), logical.UpdateOperation), resp, true) - require.NoError(t, err) require.NotNil(t, resp) require.NotZero(t, resp.Data["revocation_time"]) // Regenerate the CRLs - resp, err = CBRead(b, s, "crl/rotate") - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/rotate"), logical.ReadOperation), resp, true) - + _, err = CBRead(b, s, "crl/rotate") require.NoError(t, err) // Ensure the old cert isn't on its own CRL. @@ -815,7 +797,7 @@ func TestIssuerRevocation(t *testing.T) { require.NoError(t, err) // Issue a leaf cert and ensure it fails (because the issuer is revoked). - resp, err = CBWrite(b, s, "issuer/root2/issue/local-testing", map[string]interface{}{ + _, err = CBWrite(b, s, "issuer/root2/issue/local-testing", map[string]interface{}{ "common_name": "testing", }) require.Error(t, err) @@ -841,8 +823,6 @@ func TestIssuerRevocation(t *testing.T) { resp, err = CBWrite(b, s, "intermediate/set-signed", map[string]interface{}{ "certificate": intCert, }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("intermediate/set-signed"), logical.UpdateOperation), resp, true) - require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data["imported_issuers"]) @@ -858,8 +838,6 @@ func TestIssuerRevocation(t *testing.T) { resp, err = CBWrite(b, s, "issuer/int1/issue/local-testing", map[string]interface{}{ "common_name": "testing", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/int1/issue/local-testing"), logical.UpdateOperation), resp, true) - require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data["certificate"]) @@ -1002,9 +980,8 @@ func TestAutoRebuild(t *testing.T) { }) require.NoError(t, err) - defaultCrlPath := "/v1/pki/crl" - crl := getParsedCrlAtPath(t, client, defaultCrlPath).TBSCertList - lastCRLNumber := getCRLNumber(t, crl) + crl := getCrlCertificateList(t, client, "pki") + lastCRLNumber := crl.Version lastCRLExpiry := crl.NextUpdate requireSerialNumberInCRL(t, crl, leafSerial) @@ -1018,12 +995,6 @@ func TestAutoRebuild(t *testing.T) { }) require.NoError(t, err) - // Wait for the CRL to update based on the configuration change we just did - // so that it doesn't grab the revocation we are going to do afterwards. - crl = waitForUpdatedCrl(t, client, defaultCrlPath, lastCRLNumber, lastCRLExpiry.Sub(time.Now())) - lastCRLNumber = getCRLNumber(t, crl) - lastCRLExpiry = crl.NextUpdate - // Issue a cert and revoke it. resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ "common_name": "example.com", @@ -1044,7 +1015,13 @@ func TestAutoRebuild(t *testing.T) { // each revocation. Pull the storage from the cluster (via the sys/raw // endpoint which requires the mount UUID) and verify the revInfo contains // a matching issuer. - pkiMount := findStorageMountUuid(t, client, "pki") + resp, err = client.Logical().Read("sys/mounts/pki") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["uuid"]) + pkiMount := resp.Data["uuid"].(string) + require.NotEmpty(t, pkiMount) revEntryPath := "logical/" + pkiMount + "/" + revokedPath + normalizeSerial(newLeafSerial) // storage from cluster.Core[0] is a physical storage copy, not a logical @@ -1065,7 +1042,7 @@ func TestAutoRebuild(t *testing.T) { // New serial should not appear on CRL. crl = getCrlCertificateList(t, client, "pki") - thisCRLNumber := getCRLNumber(t, crl) + thisCRLNumber := crl.Version requireSerialNumberInCRL(t, crl, leafSerial) // But the old one should. now := time.Now() graceInterval, _ := time.ParseDuration(gracePeriod) @@ -1086,7 +1063,7 @@ func TestAutoRebuild(t *testing.T) { } // This serial should exist in the delta WAL section for the mount... - resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + localDeltaWALPath) + resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + deltaWALPath) require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data) @@ -1106,7 +1083,7 @@ func TestAutoRebuild(t *testing.T) { default: // Check and see if there's a storage entry for the last rebuild // serial. If so, validate the delta CRL contains this entry. - resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + localDeltaWALPath) + resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + deltaWALPath) require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data) @@ -1127,7 +1104,7 @@ func TestAutoRebuild(t *testing.T) { } // Read the marker and see if its correct. - resp, err = client.Logical().Read("sys/raw/logical/" + pkiMount + "/" + localDeltaWALLastBuildSerial) + resp, err = client.Logical().Read("sys/raw/logical/" + pkiMount + "/" + deltaWALLastBuildSerial) require.NoError(t, err) if resp == nil { time.Sleep(1 * time.Second) @@ -1150,13 +1127,8 @@ func TestAutoRebuild(t *testing.T) { deltaCrl := getParsedCrlAtPath(t, client, "/v1/pki/crl/delta").TBSCertList if !requireSerialNumberInCRL(nil, deltaCrl, newLeafSerial) { // Check if it is on the main CRL because its already regenerated. - mainCRL := getParsedCrlAtPath(t, client, defaultCrlPath).TBSCertList + mainCRL := getParsedCrlAtPath(t, client, "/v1/pki/crl").TBSCertList requireSerialNumberInCRL(t, mainCRL, newLeafSerial) - } else { - referenceCrlNum := getCrlReferenceFromDelta(t, deltaCrl) - if lastCRLNumber < referenceCrlNum { - lastCRLNumber = referenceCrlNum - } } } } @@ -1167,26 +1139,38 @@ func TestAutoRebuild(t *testing.T) { time.Sleep(expectedUpdate.Sub(now)) } - crl = waitForUpdatedCrl(t, client, defaultCrlPath, lastCRLNumber, lastCRLExpiry.Sub(now)+delta) - requireSerialNumberInCRL(t, crl, leafSerial) - requireSerialNumberInCRL(t, crl, newLeafSerial) -} + // Otherwise, the absolute latest we're willing to wait is some delta + // after CRL expiry (to let stuff regenerate &c). + interruptChan = time.After(lastCRLExpiry.Sub(now) + delta) + for { + select { + case <-interruptChan: + t.Fatalf("expected CRL to regenerate prior to CRL expiry (plus %v grace period)", delta) + default: + crl = getCrlCertificateList(t, client, "pki") + if crl.NextUpdate.Equal(lastCRLExpiry) { + // Hack to ensure we got a net-new CRL. If we didn't, we can + // exit this default conditional and wait for the next + // go-round. When the timer fires, it'll populate the channel + // and we'll exit correctly. + time.Sleep(1 * time.Second) + break + } -func findStorageMountUuid(t *testing.T, client *api.Client, mount string) string { - resp, err := client.Logical().Read("sys/mounts/" + mount) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["uuid"]) - pkiMount := resp.Data["uuid"].(string) - require.NotEmpty(t, pkiMount) - return pkiMount + now := time.Now() + require.True(t, crl.ThisUpdate.Before(now)) + require.True(t, crl.NextUpdate.After(now)) + requireSerialNumberInCRL(t, crl, leafSerial) + requireSerialNumberInCRL(t, crl, newLeafSerial) + return + } + } } func TestTidyIssuerAssociation(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Create a root CA. resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -1334,106 +1318,3 @@ func requestCrlFromBackend(t *testing.T, s logical.Storage, b *backend) *logical require.False(t, resp.IsError(), "crl error response: %v", resp) return resp } - -func TestCRLWarningsEmptyKeyUsage(t *testing.T) { - t.Parallel() - - b, s := CreateBackendWithStorage(t) - - // Generated using OpenSSL with a configuration lacking KeyUsage on - // the CA certificate. - cert := `-----BEGIN CERTIFICATE----- -MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 -LW9sZDAeFw0yMDAxMDEwMTAxMDFaFw0yMTAxMDEwMTAxMDFaMBMxETAPBgNVBAMM -CHJvb3Qtb2xkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzqhSZxAL -PwFhCIPL1jFPq6jxp1wFgo6YNSfVI13gfaGIjfErxsQUbosmlEuTeOc50zXXN3kb -SDufy5Yi1OeSkFZRdJ78zdKzsEDIVR1ukUngVsSrt05gdNMJlh8XOPbcrJo78jYG -lRgtkkFSc/wCu+ue6JqkfKrbUY/G9WK0UM8ppHm1Ux67ZGoypyEgaqqxKHBRC4Yl -D+lAs1vP4C6cavqdUMKgAPTKmMBzlbpCuYPLHSzWh9Com3WQSqCbrlo3uH5RT3V9 -5Gjuk3mMUhY1l6fRL7wG3f+4x+DS+ICQNT0o4lnMxpIsiTh0cEHUFgY7G0iHWYPj -CIN8UDhpZIpoCQIDAQABo2UwYzAdBgNVHQ4EFgQUJlHk3PN7pfC22FGxAb0rWgQt -L4cwHwYDVR0jBBgwFoAUJlHk3PN7pfC22FGxAb0rWgQtL4cwDAYDVR0TBAUwAwEB -/zATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAcaU0FbXb -FfXluBrjKfOzVKz+kvQ1CVv3xe3MBkS6wvqybBjJCFChnqCPxEe57BdSbBXNU5LZ -zCR/OqYas4Csv9+msSn9BI2FSMAmfMDTsp5/6iIQJqlJx9L8a7bjzVMGX6QJm/3x -S/EgGsMETAgewQXeu4jhI6StgJ2V/4Ofe498hYw4LAiBapJmkU/nHezWodNBZJ7h -LcLOzVj0Hu5MZplGBgJFgRqBCVVkqXA0q7tORuhNzYtNdJFpv3pZIhvVFFu3HUPf -wYQPhLye5WNtosz5xKe8X0Q9qp8g6azMTk+5Qe7u1d8MYAA2AIlGuKUvPHRruOmN -NC+gQnS7AK1lCw== ------END CERTIFICATE-----` - privKey := `-----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOqFJnEAs/AWEI -g8vWMU+rqPGnXAWCjpg1J9UjXeB9oYiN8SvGxBRuiyaUS5N45znTNdc3eRtIO5/L -liLU55KQVlF0nvzN0rOwQMhVHW6RSeBWxKu3TmB00wmWHxc49tysmjvyNgaVGC2S -QVJz/AK7657omqR8qttRj8b1YrRQzymkebVTHrtkajKnISBqqrEocFELhiUP6UCz -W8/gLpxq+p1QwqAA9MqYwHOVukK5g8sdLNaH0KibdZBKoJuuWje4flFPdX3kaO6T -eYxSFjWXp9EvvAbd/7jH4NL4gJA1PSjiWczGkiyJOHRwQdQWBjsbSIdZg+MIg3xQ -OGlkimgJAgMBAAECggEABKmCdmXDwy+eR0ll41aoc/hzPzHRxADAiU51Pf+DrYHj -6UPcF3db+KR2Adl0ocEhqlSoHs3CIk6KC9c+wOvagBwaaVWe4WvT9vF3M4he8rMm -dv6n2xJPFcOfDz5zUSssjk5KdOvoGRv7BzYnDIvOafvmUVwPwuo92Wizddy8saf4 -Xuea0Cupz1PELPKkbXcAqb+TzbAZrwdPj1Y7vTe/KGE4+aoDqCW/sFB1E0UsMGlt -/yfGwFP48b7kdkqSpcEQW5H8+WL3TfqRcolCD9To4vo2J+1Po0S/8qPNRvkNQDDX -AypHtrXFBOWHpJgXT4rKyH+ZGJchrCRDblt9s/sNQwKBgQD7NytvYET3pWemYiX+ -MB9uc6cPuMFONvlzjA9T6dbOSi/HLaeDoW027aMUZqb7QeaQCoWcUwh13dI2SZq0 -5+l9hei4JkWjoDhbWmPe7zDuQr3UMl0CSk3egz3BSHkjAhRAuUxK0QLKGB23zWxz -k8mUWYZaZRA39C6aqMt/jbJjDwKBgQDSl+eO+DjpwPzrjPSphpF4xYo4XDje9ovK -9q4KTHye7Flc3cMCX3WZBmzdt0zbqu6gWZjJH0XbWX/+SkJBGh77XWD0FeQnU7Vk -ipoeb8zTsCVxD9EytQuXti3cqBgClcCMvLKgLOJIcNYTnygojwg3t+jboQqbtV7p -VpQfAC6jZwKBgQCxJ46x1CnOmg4l/0DbqAQCV/yP0bI//fSbz0Ff459fimF3DHL9 -GHF0MtC2Kk3HEgoNud3PB58Hv43mSrGWsZSuuCgM9LBXWz1i7rNPG05eNyK26W09 -mDihmduK2hjS3zx5CDMM76gP7EHIxEyelLGqtBdS18JAMypKVo5rPPl3cQKBgQCG -ueXLImQOr4dfDntLpSqV0BLAQcekZKhEPZJURmCHr37wGXNzpixurJyjL2w9MFqf -PRKwwJAJZ3Wp8kn2qkZd23x2Szb+LeBjJQS6Kh4o44zgixTz0r1K3qLygptxs+pO -Xz4LmQte+skKHo0rfW3tb3vKXnmR6fOBZgE23//2SwKBgHck44hoE1Ex2gDEfIq1 -04OBoS1cpuc9ge4uHEmv+8uANjzwlsYf8hY1qae513MGixRBOkxcI5xX/fYPQV9F -t3Jfh8QX85JjnGntuXuraYZJMUjpwXr3QHPx0jpvAM3Au5j6qD3biC9Vrwq9Chkg -hbiiPARizZA/Tsna/9ox1qDT ------END PRIVATE KEY-----` - resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": cert + "\n" + privKey, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Warnings) - originalWarnings := resp.Warnings - - resp, err = CBRead(b, s, "crl/rotate") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Warnings) - - // All CRL-specific warnings should've already occurred earlier on the - // import's CRL rebuild. - for _, warning := range resp.Warnings { - require.Contains(t, originalWarnings, warning) - } - - // Deleting the issuer and key should remove the warning. - _, err = CBDelete(b, s, "root") - require.NoError(t, err) - - resp, err = CBRead(b, s, "crl/rotate") - require.NoError(t, err) - require.NotNil(t, resp) - require.Empty(t, resp.Warnings) - - // Adding back just the cert shouldn't cause CRL rebuild warnings. - resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": cert, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotNil(t, resp.Data["mapping"]) - require.NotEmpty(t, resp.Data["mapping"]) - require.Equal(t, len(resp.Data["mapping"].(map[string]string)), 1) - for key, value := range resp.Data["mapping"].(map[string]string) { - require.NotEmpty(t, key) - require.Empty(t, value) - } - - resp, err = CBRead(b, s, "crl/rotate") - require.NoError(t, err) - require.NotNil(t, resp) - require.Empty(t, resp.Warnings) -} diff --git a/builtin/logical/pki/crl_util.go b/builtin/logical/pki/crl_util.go index 894e427f10110..d3337e06ba439 100644 --- a/builtin/logical/pki/crl_util.go +++ b/builtin/logical/pki/crl_util.go @@ -1,10 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "bytes" + "context" "crypto/rand" "crypto/x509" "crypto/x509/pkix" @@ -17,24 +15,17 @@ import ( atomic2 "go.uber.org/atomic" "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) const ( - revokedPath = "revoked/" - crossRevocationPrefix = "cross-revocation-queue/" - crossRevocationPath = crossRevocationPrefix + "{{clusterId}}/" - deltaWALLastBuildSerialName = "last-build-serial" - deltaWALLastRevokedSerialName = "last-revoked-serial" - localDeltaWALPath = "delta-wal/" - localDeltaWALLastBuildSerial = localDeltaWALPath + deltaWALLastBuildSerialName - localDeltaWALLastRevokedSerial = localDeltaWALPath + deltaWALLastRevokedSerialName - unifiedDeltaWALPrefix = "unified-delta-wal/" - unifiedDeltaWALPath = "unified-delta-wal/{{clusterId}}/" - unifiedDeltaWALLastBuildSerial = unifiedDeltaWALPath + deltaWALLastBuildSerialName - unifiedDeltaWALLastRevokedSerial = unifiedDeltaWALPath + deltaWALLastRevokedSerialName + revokedPath = "revoked/" + deltaWALPath = "delta-wal/" + deltaWALLastBuildSerialName = "last-build-serial" + deltaWALLastBuildSerial = deltaWALPath + deltaWALLastBuildSerialName + deltaWALLastRevokedSerialName = "last-revoked-serial" + deltaWALLastRevokedSerial = deltaWALPath + deltaWALLastRevokedSerialName ) type revocationInfo struct { @@ -44,20 +35,6 @@ type revocationInfo struct { CertificateIssuer issuerID `json:"issuer_id"` } -type revocationRequest struct { - RequestedAt time.Time `json:"requested_at"` -} - -type revocationConfirmed struct { - RevokedAt string `json:"revoked_at"` - Source string `json:"source"` -} - -type revocationQueueEntry struct { - Cluster string - Serial string -} - type ( // Placeholder in case of migrations needing more data. Currently // we use the path name to store the serial number that was revoked. @@ -94,21 +71,13 @@ type crlBuilder struct { canRebuild bool lastDeltaRebuildCheck time.Time - _config sync.RWMutex - dirty *atomic2.Bool - config crlConfig - haveInitializedConfig bool + _config sync.RWMutex + dirty *atomic2.Bool + config crlConfig // Whether to invalidate our LastModifiedTime due to write on the // global issuance config. invalidate *atomic2.Bool - - // Global revocation queue entries get accepted by the invalidate func - // and passed to the crlBuilder for processing. - haveInitializedQueue *atomic2.Bool - revQueue *revocationQueue - removalQueue *revocationQueue - crossQueue *revocationQueue } const ( @@ -127,10 +96,6 @@ func newCRLBuilder(canRebuild bool) *crlBuilder { dirty: atomic2.NewBool(true), config: defaultCrlConfig, invalidate: atomic2.NewBool(false), - haveInitializedQueue: atomic2.NewBool(false), - revQueue: newRevocationQueue(), - removalQueue: newRevocationQueue(), - crossQueue: newRevocationQueue(), } } @@ -155,7 +120,6 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { return err } - previousConfig := cb.config // Set the default config if none was returned to us. if config != nil { cb.config = *config @@ -165,34 +129,11 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { // Updated the config; unset dirty. cb.dirty.Store(false) - triggerChangeNotification := true - if !cb.haveInitializedConfig { - cb.haveInitializedConfig = true - triggerChangeNotification = false // do not trigger on the initial loading of configuration. - } - - // Certain things need to be triggered on all server types when crlConfig is loaded. - if triggerChangeNotification { - cb.notifyOnConfigChange(sc, previousConfig, cb.config) - } } return nil } -func (cb *crlBuilder) notifyOnConfigChange(sc *storageContext, priorConfig crlConfig, newConfig crlConfig) { - // If you need to hook into a CRL configuration change across different server types - // such as primary clusters as well as performance replicas, it is easier to do here than - // in two places (API layer and in invalidateFunc) - if priorConfig.UnifiedCRL != newConfig.UnifiedCRL && newConfig.UnifiedCRL { - sc.Backend.unifiedTransferStatus.forceRun() - } - - if priorConfig.UseGlobalQueue != newConfig.UseGlobalQueue && newConfig.UseGlobalQueue { - cb.haveInitializedQueue.Store(false) - } -} - func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error) { // Config may mutate immediately after accessing, but will be freshly // fetched if necessary. @@ -225,21 +166,21 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { // // We store a list of all (unique) CRLs in the cluster-local CRL // configuration along with their expiration dates. - internalCRLConfig, err := sc.getLocalCRLConfig() + crlConfig, err := sc.getLocalCRLConfig() if err != nil { - return fmt.Errorf("error checking for auto-rebuild status: unable to fetch cluster-local CRL configuration: %w", err) + return fmt.Errorf("error checking for auto-rebuild status: unable to fetch cluster-local CRL configuration: %v", err) } // If there's no config, assume we've gotta rebuild it to get this // information. - if internalCRLConfig == nil { + if crlConfig == nil { cb.forceRebuild.Store(true) return nil } // If the map is empty, assume we need to upgrade and schedule a // rebuild. - if len(internalCRLConfig.CRLExpirationMap) == 0 { + if len(crlConfig.CRLExpirationMap) == 0 { cb.forceRebuild.Store(true) return nil } @@ -255,13 +196,13 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { // error. defaultPeriod, defaultErr := time.ParseDuration(defaultCrlConfig.AutoRebuildGracePeriod) if defaultErr != nil { - return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %w\n", cfg.AutoRebuildGracePeriod, defaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) + return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %v\n", cfg.AutoRebuildGracePeriod, defaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) } period = defaultPeriod } - for _, value := range internalCRLConfig.CRLExpirationMap { + for _, value := range crlConfig.CRLExpirationMap { if value.IsZero() || now.After(value.Add(-1*period)) { cb.forceRebuild.Store(true) return nil @@ -284,7 +225,7 @@ func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { cfg, err := sc.getLocalCRLConfig() if err != nil { cb.invalidate.Store(true) - return fmt.Errorf("unable to update local CRL config's modification time: error fetching: %w", err) + return fmt.Errorf("unable to update local CRL config's modification time: error fetching: %v", err) } cfg.LastModified = time.Now().UTC() @@ -292,7 +233,7 @@ func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { err = sc.setLocalCRLConfig(cfg) if err != nil { cb.invalidate.Store(true) - return fmt.Errorf("unable to update local CRL config's modification time: error persisting: %w", err) + return fmt.Errorf("unable to update local CRL config's modification time: error persisting: %v", err) } } @@ -301,17 +242,17 @@ func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { // rebuildIfForced is to be called by readers or periodic functions that might need to trigger // a refresh of the CRL before the read occurs. -func (cb *crlBuilder) rebuildIfForced(sc *storageContext) ([]string, error) { +func (cb *crlBuilder) rebuildIfForced(ctx context.Context, b *backend, request *logical.Request) error { if cb.forceRebuild.Load() { - return cb._doRebuild(sc, true, _enforceForceFlag) + return cb._doRebuild(ctx, b, request, true, _enforceForceFlag) } - return nil, nil + return nil } // rebuild is to be called by various write apis that know the CRL is to be updated and can be now. -func (cb *crlBuilder) rebuild(sc *storageContext, forceNew bool) ([]string, error) { - return cb._doRebuild(sc, forceNew, _ignoreForceFlag) +func (cb *crlBuilder) rebuild(ctx context.Context, b *backend, request *logical.Request, forceNew bool) error { + return cb._doRebuild(ctx, b, request, forceNew, _ignoreForceFlag) } // requestRebuildIfActiveNode will schedule a rebuild of the CRL from the next read or write api call assuming we are the active node of a cluster @@ -329,7 +270,7 @@ func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { cb.forceRebuild.Store(true) } -func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceFlag bool) ([]string, error) { +func (cb *crlBuilder) _doRebuild(ctx context.Context, b *backend, request *logical.Request, forceNew bool, ignoreForceFlag bool) error { cb._builder.Lock() defer cb._builder.Unlock() // Re-read the lock in case someone beat us to the punch between the previous load op. @@ -343,17 +284,17 @@ func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceF // if forceRebuild was requested, that should force a complete rebuild even if requested not too by forceNew myForceNew := forceBuildFlag || forceNew - return buildCRLs(sc, myForceNew) + return buildCRLs(ctx, b, request, myForceNew) } - return nil, nil + return nil } -func (cb *crlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path string) ([]string, error) { +func (cb *crlBuilder) getPresentDeltaWALForClearing(sc *storageContext) ([]string, error) { // Clearing of the delta WAL occurs after a new complete CRL has been built. - walSerials, err := sc.Storage.List(sc.Context, path) + walSerials, err := sc.Storage.List(sc.Context, deltaWALPath) if err != nil { - return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %w", err) + return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %s", err) } // We _should_ remove the special WAL entries here, but we don't really @@ -362,60 +303,23 @@ func (cb *crlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path st return walSerials, nil } -func (cb *crlBuilder) getPresentLocalDeltaWALForClearing(sc *storageContext) ([]string, error) { - return cb._getPresentDeltaWALForClearing(sc, localDeltaWALPath) -} - -func (cb *crlBuilder) getPresentUnifiedDeltaWALForClearing(sc *storageContext) ([]string, error) { - walClusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) - if err != nil { - return nil, fmt.Errorf("error fetching list of clusters with delta WAL entries: %w", err) - } - - var allPaths []string - for index, cluster := range walClusters { - prefix := unifiedDeltaWALPrefix + cluster - clusterPaths, err := cb._getPresentDeltaWALForClearing(sc, prefix) - if err != nil { - return nil, fmt.Errorf("error fetching delta WAL entries for cluster (%v / %v): %w", index, cluster, err) - } - - // Here, we don't want to include the unifiedDeltaWALPrefix because - // clearUnifiedDeltaWAL handles that for us. Instead, just include - // the cluster identifier. - for _, clusterPath := range clusterPaths { - allPaths = append(allPaths, cluster+clusterPath) - } - } - - return allPaths, nil -} - -func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, path string) error { +func (cb *crlBuilder) clearDeltaWAL(sc *storageContext, walSerials []string) error { // Clearing of the delta WAL occurs after a new complete CRL has been built. for _, serial := range walSerials { // Don't remove our special entries! - if strings.HasSuffix(serial, deltaWALLastBuildSerialName) || strings.HasSuffix(serial, deltaWALLastRevokedSerialName) { + if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { continue } - if err := sc.Storage.Delete(sc.Context, path+serial); err != nil { - return fmt.Errorf("error clearing delta WAL certificate: %w", err) + if err := sc.Storage.Delete(sc.Context, deltaWALPath+serial); err != nil { + return fmt.Errorf("error clearing delta WAL certificate: %s", err) } } return nil } -func (cb *crlBuilder) clearLocalDeltaWAL(sc *storageContext, walSerials []string) error { - return cb._clearDeltaWAL(sc, walSerials, localDeltaWALPath) -} - -func (cb *crlBuilder) clearUnifiedDeltaWAL(sc *storageContext, walSerials []string) error { - return cb._clearDeltaWAL(sc, walSerials, unifiedDeltaWALPrefix) -} - -func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) ([]string, error) { +func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) error { // Delta CRLs use the same expiry duration as the complete CRL. Because // we always rebuild the complete CRL and then the delta CRL, we can // be assured that the delta CRL always expires after a complete CRL, @@ -427,18 +331,18 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // within our time window for updating it. cfg, err := cb.getConfigWithUpdate(sc) if err != nil { - return nil, err + return err } if !cfg.EnableDelta { // We explicitly do not update the last check time here, as we // want to persist the last rebuild window if it hasn't been set. - return nil, nil + return nil } deltaRebuildDuration, err := time.ParseDuration(cfg.DeltaRebuildInterval) if err != nil { - return nil, err + return err } // Acquire CRL building locks before we get too much further. @@ -454,7 +358,7 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // If we're still before the time of our next rebuild check, we can // safely return here even if we have certs. We'll wait for a bit, // retrigger this check, and then do the rebuild. - return nil, nil + return nil } // Update our check time. If we bail out below (due to storage errors @@ -463,39 +367,20 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // until our next complete CRL build. cb.lastDeltaRebuildCheck = now - rebuildLocal, err := cb._shouldRebuildLocalCRLs(sc, override) - if err != nil { - return nil, fmt.Errorf("error determining if local CRLs should be rebuilt: %w", err) - } - - rebuildUnified, err := cb._shouldRebuildUnifiedCRLs(sc, override) - if err != nil { - return nil, fmt.Errorf("error determining if unified CRLs should be rebuilt: %w", err) - } - - if !rebuildLocal && !rebuildUnified { - return nil, nil - } - - // Finally, we must've needed to do the rebuild. Execute! - return cb.rebuildDeltaCRLsHoldingLock(sc, false) -} - -func (cb *crlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) (bool, error) { // Fetch two storage entries to see if we actually need to do this // rebuild, given we're within the window. - lastWALEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastRevokedSerial) + lastWALEntry, err := sc.Storage.Get(sc.Context, deltaWALLastRevokedSerial) if err != nil || !override && (lastWALEntry == nil || lastWALEntry.Value == nil) { // If this entry does not exist, we don't need to rebuild the // delta WAL due to the expiration assumption above. There must // not have been any new revocations. Since err should be nil // in this case, we can safely return it. - return false, err + return err } - lastBuildEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastBuildSerial) + lastBuildEntry, err := sc.Storage.Get(sc.Context, deltaWALLastBuildSerial) if err != nil { - return false, err + return err } if !override && lastBuildEntry != nil && lastBuildEntry.Value != nil { @@ -508,369 +393,37 @@ func (cb *crlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) // guard. var walInfo lastWALInfo if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return false, err + return err } var deltaInfo lastDeltaInfo if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { - return false, err + return err } // Here, everything decoded properly and we know that no new certs // have been revoked since we built this last delta CRL. We can exit // without rebuilding then. if walInfo.Serial == deltaInfo.Serial { - return false, nil - } - } - - return true, nil -} - -func (cb *crlBuilder) _shouldRebuildUnifiedCRLs(sc *storageContext, override bool) (bool, error) { - // Unified CRL can only be built by the main cluster. - b := sc.Backend - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - return false, nil - } - - // If we're overriding whether we should build Delta CRLs, always return - // true, even if storage errors might've happen. - if override { - return true, nil - } - - // Fetch two storage entries to see if we actually need to do this - // rebuild, given we're within the window. We need to fetch these - // two entries per cluster. - clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) - if err != nil { - return false, fmt.Errorf("failed to get the list of clusters having written Delta WALs: %w", err) - } - - // If any cluster tells us to rebuild, we should rebuild. - shouldRebuild := false - for index, cluster := range clusters { - prefix := unifiedDeltaWALPrefix + cluster - clusterUnifiedLastRevokedWALEntry := prefix + deltaWALLastRevokedSerialName - clusterUnifiedLastBuiltWALEntry := prefix + deltaWALLastBuildSerialName - - lastWALEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastRevokedWALEntry) - if err != nil { - return false, fmt.Errorf("failed fetching last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) - } - - if lastWALEntry == nil || lastWALEntry.Value == nil { - continue - } - - lastBuildEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastBuiltWALEntry) - if err != nil { - return false, fmt.Errorf("failed fetching last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) - } - - if lastBuildEntry == nil || lastBuildEntry.Value == nil { - // If the last build entry doesn't exist, we still want to build a - // new delta WAL, since this could be our very first time doing so. - shouldRebuild = true - break - } - - // Otherwise, here, now that we know it exists, we want to check this - // value against the other value. Since we previously guarded the WAL - // entry being non-empty, we're good to decode everything within this - // guard. - var walInfo lastWALInfo - if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return false, fmt.Errorf("failed decoding last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) - } - - var deltaInfo lastDeltaInfo - if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { - return false, fmt.Errorf("failed decoding last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) - } - - if walInfo.Serial != deltaInfo.Serial { - shouldRebuild = true - break + return nil } } - // No errors occurred, so return the result. - return shouldRebuild, nil + // Finally, we must've needed to do the rebuild. Execute! + return cb.rebuildDeltaCRLsHoldingLock(sc, false) } -func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) ([]string, error) { +func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) error { cb._builder.Lock() defer cb._builder.Unlock() return cb.rebuildDeltaCRLsHoldingLock(sc, forceNew) } -func (cb *crlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) ([]string, error) { +func (cb *crlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) error { return buildAnyCRLs(sc, forceNew, true /* building delta */) } -func (cb *crlBuilder) addCertForRevocationCheck(cluster, serial string) { - entry := &revocationQueueEntry{ - Cluster: cluster, - Serial: serial, - } - cb.revQueue.Add(entry) -} - -func (cb *crlBuilder) addCertForRevocationRemoval(cluster, serial string) { - entry := &revocationQueueEntry{ - Cluster: cluster, - Serial: serial, - } - cb.removalQueue.Add(entry) -} - -func (cb *crlBuilder) addCertFromCrossRevocation(cluster, serial string) { - entry := &revocationQueueEntry{ - Cluster: cluster, - Serial: serial, - } - cb.crossQueue.Add(entry) -} - -func (cb *crlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotPerfPrimary bool) error { - // Assume holding lock. - if cb.haveInitializedQueue.Load() { - return nil - } - - sc.Backend.Logger().Debug(fmt.Sprintf("gathering first time existing revocations")) - - clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) - } - - sc.Backend.Logger().Debug(fmt.Sprintf("found %v clusters: %v", len(clusters), clusters)) - - for cIndex, cluster := range clusters { - cluster = cluster[0 : len(cluster)-1] - cPath := crossRevocationPrefix + cluster + "/" - serials, err := sc.Storage.List(sc.Context, cPath) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) - } - - sc.Backend.Logger().Debug(fmt.Sprintf("found %v serials for cluster %v: %v", len(serials), cluster, serials)) - - for _, serial := range serials { - if serial[len(serial)-1] == '/' { - serial = serial[0 : len(serial)-1] - } - - ePath := cPath + serial - eConfirmPath := ePath + "/confirmed" - removalEntry, err := sc.Storage.Get(sc.Context, eConfirmPath) - - entry := &revocationQueueEntry{ - Cluster: cluster, - Serial: serial, - } - - // No removal entry yet; add to regular queue. Otherwise, slate it - // for removal if we're a perfPrimary. - if err != nil || removalEntry == nil { - cb.revQueue.Add(entry) - } else if !isNotPerfPrimary { - cb.removalQueue.Add(entry) - } // Else, this is a confirmation but we're on a perf secondary so ignore it. - - // Overwrite the error; we don't really care about its contents - // at this step. - err = nil - } - } - - return nil -} - -func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { - sc.Backend.Logger().Debug(fmt.Sprintf("starting to process revocation requests")) - - isNotPerfPrimary := sc.Backend.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!sc.Backend.System().LocalMount() && sc.Backend.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) - - if err := cb.maybeGatherQueueForFirstProcess(sc, isNotPerfPrimary); err != nil { - return fmt.Errorf("failed to gather first queue: %w", err) - } - - revQueue := cb.revQueue.Iterate() - removalQueue := cb.removalQueue.Iterate() - - sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v revocations and %v confirmation entries", len(revQueue), len(removalQueue))) - - crlConfig, err := cb.getConfigWithUpdate(sc) - if err != nil { - return err - } - - ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) - if err != nil { - return fmt.Errorf("unable to fetch clusterID to ignore local revocation entries: %w", err) - } - - for _, req := range revQueue { - // Regardless of whether we're on the perf primary or a secondary - // cluster, we can safely ignore revocation requests originating - // from our node, because we've already checked them once (when - // they were created). - if ourClusterId != "" && ourClusterId == req.Cluster { - continue - } - - // Fetch the revocation entry to ensure it exists. - rPath := crossRevocationPrefix + req.Cluster + "/" + req.Serial - entry, err := sc.Storage.Get(sc.Context, rPath) - if err != nil { - return fmt.Errorf("failed to read cross-cluster revocation queue entry: %w", err) - } - if entry == nil { - // Skipping this entry; it was likely an incorrect invalidation - // caused by the primary cluster removing the confirmation. - cb.revQueue.Remove(req) - continue - } - - resp, err := tryRevokeCertBySerial(sc, crlConfig, req.Serial) - if err == nil && resp != nil && !resp.IsError() && resp.Data != nil && resp.Data["state"].(string) == "revoked" { - if isNotPerfPrimary { - // Write a revocation queue removal entry. - confirmed := revocationConfirmed{ - RevokedAt: resp.Data["revocation_time_rfc3339"].(string), - Source: req.Cluster, - } - path := crossRevocationPath + req.Serial + "/confirmed" - confirmedEntry, err := logical.StorageEntryJSON(path, confirmed) - if err != nil { - return fmt.Errorf("failed to create storage entry for cross-cluster revocation confirmed response: %w", err) - } - - if err := sc.Storage.Put(sc.Context, confirmedEntry); err != nil { - return fmt.Errorf("error persisting cross-cluster revocation confirmation: %w", err) - } - } else { - // Since we're the active node of the primary cluster, go ahead - // and just remove it. - path := crossRevocationPrefix + req.Cluster + "/" + req.Serial - if err := sc.Storage.Delete(sc.Context, path); err != nil { - return fmt.Errorf("failed to delete processed revocation request: %w", err) - } - } - } else if err != nil { - // Because we fake being from a lease, we get the guarantee that - // err == nil == resp if the cert was already revoked; this means - // this err should actually be fatal. - return err - } - cb.revQueue.Remove(req) - } - - if isNotPerfPrimary { - sc.Backend.Logger().Debug(fmt.Sprintf("not on perf primary so ignoring any revocation confirmations")) - - // See note in pki/backend.go; this should be empty. - cb.removalQueue.RemoveAll() - cb.haveInitializedQueue.Store(true) - return nil - } - - clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) - if err != nil { - return err - } - - for _, entry := range removalQueue { - // First remove the revocation request. - for cIndex, cluster := range clusters { - eEntry := crossRevocationPrefix + cluster + entry.Serial - if err := sc.Storage.Delete(sc.Context, eEntry); err != nil { - return fmt.Errorf("failed to delete potential cross-cluster revocation entry for cluster %v (%v) and serial %v: %w", cluster, cIndex, entry.Serial, err) - } - } - - // Then remove the confirmation. - if err := sc.Storage.Delete(sc.Context, crossRevocationPrefix+entry.Cluster+"/"+entry.Serial+"/confirmed"); err != nil { - return fmt.Errorf("failed to delete cross-cluster revocation confirmation entry for cluster %v and serial %v: %w", entry.Cluster, entry.Serial, err) - } - - cb.removalQueue.Remove(entry) - } - - cb.haveInitializedQueue.Store(true) - - return nil -} - -func (cb *crlBuilder) processCrossClusterRevocations(sc *storageContext) error { - sc.Backend.Logger().Debug(fmt.Sprintf("starting to process unified revocations")) - - crlConfig, err := cb.getConfigWithUpdate(sc) - if err != nil { - return err - } - - if !crlConfig.UnifiedCRL { - cb.crossQueue.RemoveAll() - return nil - } - - crossQueue := cb.crossQueue.Iterate() - sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v unified revocations entries", len(crossQueue))) - - ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) - if err != nil { - return fmt.Errorf("unable to fetch clusterID to ignore local unified revocation entries: %w", err) - } - - for _, req := range crossQueue { - // Regardless of whether we're on the perf primary or a secondary - // cluster, we can safely ignore revocation requests originating - // from our node, because we've already checked them once (when - // they were created). - if ourClusterId != "" && ourClusterId == req.Cluster { - continue - } - - // Fetch the revocation entry to ensure it exists and this wasn't - // a delete. - rPath := unifiedRevocationReadPathPrefix + req.Cluster + "/" + req.Serial - entry, err := sc.Storage.Get(sc.Context, rPath) - if err != nil { - return fmt.Errorf("failed to read unified revocation entry: %w", err) - } - if entry == nil { - // Skip this entry: it was likely caused by the deletion of this - // record during tidy. - cb.crossQueue.Remove(req) - continue - } - - resp, err := tryRevokeCertBySerial(sc, crlConfig, req.Serial) - if err == nil && resp != nil && !resp.IsError() && resp.Data != nil && resp.Data["state"].(string) == "revoked" { - // We could theoretically save ourselves from writing a global - // revocation entry during the above certificate revocation, as - // we don't really need it to appear on either the unified CRL - // or its delta CRL, but this would require more plumbing. - cb.crossQueue.Remove(req) - } else if err != nil { - // Because we fake being from a lease, we get the guarantee that - // err == nil == resp if the cert was already revoked; this means - // this err should actually be fatal. - return err - } - } - - return nil -} - // Helper function to fetch a map of issuerID->parsed cert for revocation // usage. Unlike other paths, this needs to handle the legacy bundle // more gracefully than rejecting it outright. @@ -881,7 +434,7 @@ func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509 if !sc.Backend.useLegacyBundleCaStorage() { issuers, err = sc.listIssuers() if err != nil { - return nil, fmt.Errorf("could not fetch issuers list: %w", err) + return nil, fmt.Errorf("could not fetch issuers list: %v", err) } } else { // Hack: this isn't a real issuerID, but it works for fetchCAInfo @@ -893,7 +446,7 @@ func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509 for _, issuer := range issuers { _, bundle, caErr := sc.fetchCertBundleByIssuerId(issuer, false) if caErr != nil { - return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %w", issuer, caErr) + return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %s", issuer, caErr) } if bundle == nil { @@ -915,53 +468,23 @@ func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509 return issuerIDCertMap, nil } -// Revoke a certificate from a given serial number if it is present in local -// storage. -func tryRevokeCertBySerial(sc *storageContext, config *crlConfig, serial string) (*logical.Response, error) { - // revokeCert requires us to hold these locks before calling it. - sc.Backend.revokeStorageLock.Lock() - defer sc.Backend.revokeStorageLock.Unlock() - - certEntry, err := fetchCertBySerial(sc, "certs/", serial) - if err != nil { - switch err.(type) { - case errutil.UserError: - return logical.ErrorResponse(err.Error()), nil - default: - return nil, err - } - } - - if certEntry == nil { - return nil, nil - } - - cert, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return nil, fmt.Errorf("error parsing certificate: %w", err) - } - - return revokeCert(sc, config, cert) -} - // Revokes a cert, and tries to be smart about error recovery -func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) (*logical.Response, error) { +func revokeCert(ctx context.Context, b *backend, req *logical.Request, serial string, fromLease bool) (*logical.Response, error) { // As this backend is self-contained and this function does not hook into // third parties to manage users or resources, if the mount is tainted, // revocation doesn't matter anyways -- the CRL that would be written will // be immediately blown away by the view being cleared. So we can simply // fast path a successful exit. - if sc.Backend.System().Tainted() { + if b.System().Tainted() { return nil, nil } - colonSerial := serialFromCert(cert) - hyphenSerial := normalizeSerial(colonSerial) - // Validate that no issuers match the serial number to be revoked. We need // to gracefully degrade to the legacy cert bundle when it is required, as // secondary PR clusters might not have been upgraded, but still need to // handle revoking certs. + sc := b.makeStorageContext(ctx, req.Storage) + issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) if err != nil { return nil, err @@ -970,93 +493,104 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( // Ensure we don't revoke an issuer via this API; use /issuer/:issuer_ref/revoke // instead. for issuer, certificate := range issuerIDCertMap { + colonSerial := strings.ReplaceAll(strings.ToLower(serial), "-", ":") if colonSerial == serialFromCert(certificate) { return logical.ErrorResponse(fmt.Sprintf("adding issuer (id: %v) to its own CRL is not allowed", issuer)), nil } } - curRevInfo, err := sc.fetchRevocationInfo(colonSerial) + alreadyRevoked := false + var revInfo revocationInfo + + revEntry, err := fetchCertBySerial(ctx, b, req, revokedPath, serial) if err != nil { - return nil, err - } - if curRevInfo != nil { - resp := &logical.Response{ - Data: map[string]interface{}{ - "revocation_time": curRevInfo.RevocationTime, - "state": "revoked", - }, + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err } - if !curRevInfo.RevocationTimeUTC.IsZero() { - resp.Data["revocation_time_rfc3339"] = curRevInfo.RevocationTimeUTC.Format(time.RFC3339Nano) + } + if revEntry != nil { + // Set the revocation info to the existing values + alreadyRevoked = true + err = revEntry.DecodeJSON(&revInfo) + if err != nil { + return nil, fmt.Errorf("error decoding existing revocation info") } - - return resp, nil } - // Add a little wiggle room because leases are stored with a second - // granularity - if cert.NotAfter.Before(time.Now().Add(2 * time.Second)) { - response := &logical.Response{} - response.AddWarning(fmt.Sprintf("certificate with serial %s already expired; refusing to add to CRL", colonSerial)) - return response, nil - } + if !alreadyRevoked { + certEntry, err := fetchCertBySerial(ctx, b, req, "certs/", serial) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err + } + } + if certEntry == nil { + if fromLease { + // We can't write to revoked/ or update the CRL anyway because we don't have the cert, + // and there's no reason to expect this will work on a subsequent + // retry. Just give up and let the lease get deleted. + b.Logger().Warn("expired certificate revoke failed because not found in storage, treating as success", "serial", serial) + return nil, nil + } + return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial)), nil + } - currTime := time.Now() - revInfo := revocationInfo{ - CertificateBytes: cert.Raw, - RevocationTime: currTime.Unix(), - RevocationTimeUTC: currTime.UTC(), - } + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) + } + if cert == nil { + return nil, fmt.Errorf("got a nil certificate") + } - // We may not find an issuer with this certificate; that's fine so - // ignore the return value. - associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) + // Add a little wiggle room because leases are stored with a second + // granularity + if cert.NotAfter.Before(time.Now().Add(2 * time.Second)) { + response := &logical.Response{} + response.AddWarning(fmt.Sprintf("certificate with serial %s already expired; refusing to add to CRL", serial)) + return response, nil + } - revEntry, err := logical.StorageEntryJSON(revokedPath+hyphenSerial, revInfo) - if err != nil { - return nil, fmt.Errorf("error creating revocation entry: %w", err) - } + // Compatibility: Don't revoke CAs if they had leases. New CAs going + // forward aren't issued leases. + if cert.IsCA && fromLease { + return nil, nil + } - certsCounted := sc.Backend.certsCounted.Load() - err = sc.Storage.Put(sc.Context, revEntry) - if err != nil { - return nil, fmt.Errorf("error saving revoked certificate to new location: %w", err) - } - sc.Backend.ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) + currTime := time.Now() + revInfo.CertificateBytes = certEntry.Value + revInfo.RevocationTime = currTime.Unix() + revInfo.RevocationTimeUTC = currTime.UTC() - // From here on out, the certificate has been revoked locally. Any other - // persistence issues might still err, but any other failure messages - // should be added as warnings to the revocation. - resp := &logical.Response{ - Data: map[string]interface{}{ - "revocation_time": revInfo.RevocationTime, - "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), - "state": "revoked", - }, - } + // We may not find an issuer with this certificate; that's fine so + // ignore the return value. + associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) - // If this flag is enabled after the fact, existing local entries will be published to - // the unified storage space through a periodic function. - failedWritingUnifiedCRL := false - if config.UnifiedCRL { - entry := &unifiedRevocationEntry{ - SerialNumber: colonSerial, - CertExpiration: cert.NotAfter, - RevocationTimeUTC: revInfo.RevocationTimeUTC, - CertificateIssuer: revInfo.CertificateIssuer, + revEntry, err = logical.StorageEntryJSON(revokedPath+normalizeSerial(serial), revInfo) + if err != nil { + return nil, fmt.Errorf("error creating revocation entry") } - ignoreErr := writeUnifiedRevocationEntry(sc, entry) - if ignoreErr != nil { - // Just log the error if we fail to write across clusters, a separate background - // thread will reattempt it later on as we have the local write done. - sc.Backend.Logger().Error("Failed to write unified revocation entry, will re-attempt later", - "serial_number", colonSerial, "error", ignoreErr) - sc.Backend.unifiedTransferStatus.forceRun() - - resp.AddWarning(fmt.Sprintf("Failed to write unified revocation entry, will re-attempt later: %v", err)) - failedWritingUnifiedCRL = true + certsCounted := b.certsCounted.Load() + err = req.Storage.Put(ctx, revEntry) + if err != nil { + return nil, fmt.Errorf("error saving revoked certificate to new location") } + b.incrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) + } + + // Fetch the config and see if we need to rebuild the CRL. If we have + // auto building enabled, we will wait for the next rebuild period to + // actually rebuild it. + config, err := b.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error building CRL: while updating config: %v", err) } if !config.AutoRebuild { @@ -1064,7 +598,7 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( // already rebuilt the full CRL so the Delta WAL will be cleared // afterwards. Writing an entry only to immediately remove it // isn't necessary. - warnings, crlErr := sc.Backend.crlBuilder.rebuild(sc, false) + crlErr := b.crlBuilder.rebuild(ctx, b, req, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -1072,107 +606,62 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( default: return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } - } - for index, warning := range warnings { - resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } - } else if config.EnableDelta { - if err := writeRevocationDeltaWALs(sc, config, resp, failedWritingUnifiedCRL, hyphenSerial, colonSerial); err != nil { - return nil, fmt.Errorf("failed to write WAL entries for Delta CRLs: %w", err) - } - } - - return resp, nil -} - -func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, resp *logical.Response, failedWritingUnifiedCRL bool, hyphenSerial string, colonSerial string) error { - if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, localDeltaWALPath); err != nil { - return fmt.Errorf("failed to write local delta WAL entry: %w", err) - } - - if config.UnifiedCRL && !failedWritingUnifiedCRL { - // We only need to write cross-cluster unified Delta WAL entries when - // it is enabled; in particular, because we rebuild CRLs when enabling - // this flag, any revocations that happened prior to enabling unified - // revocation will appear on the complete CRL (+/- synchronization: - // in particular, if a perf replica revokes a cert prior to seeing - // unified revocation enabled, but after the main node has done the - // listing for the unified CRL rebuild, this revocation will not - // appear on either the main or the next delta CRL, but will need to - // wait for a subsequent complete CRL rebuild). - // - // Lastly, we don't attempt this if the unified CRL entry failed to - // write, as we need that entry before the delta WAL entry will make - // sense. - if ignoredErr := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); ignoredErr != nil { - // Just log the error if we fail to write across clusters, a separate background - // thread will reattempt it later on as we have the local write done. - sc.Backend.Logger().Error("Failed to write cross-cluster delta WAL entry, will re-attempt later", - "serial_number", colonSerial, "error", ignoredErr) - sc.Backend.unifiedTransferStatus.forceRun() - - resp.AddWarning(fmt.Sprintf("Failed to write cross-cluster delta WAL entry, will re-attempt later: %v", ignoredErr)) - } - } else if failedWritingUnifiedCRL { - resp.AddWarning("Skipping cross-cluster delta WAL entry as cross-cluster revocation failed to write; will re-attempt later.") - } - - return nil -} - -func writeSpecificRevocationDeltaWALs(sc *storageContext, hyphenSerial string, colonSerial string, pathPrefix string) error { - // Previously, regardless of whether or not we've presently enabled - // Delta CRLs, we would always write the Delta WAL in case it is - // enabled in the future. We though we could trigger another full CRL - // rebuild instead (to avoid inconsistent state between the CRL and - // missing Delta WAL entries), but writing extra (unused?) WAL entries - // versus an expensive full CRL rebuild was thought of as being - // probably a net wash. - // - // However, we've now added unified CRL building, adding cross-cluster - // writes to the revocation path. Because this is relatively expensive, - // we've opted to rebuild the complete+delta CRLs when toggling the - // state of delta enabled, instead of always writing delta CRL entries. - // - // Thus Delta WAL building happens **only** when Delta CRLs are enabled. - // - // We should only do this when the cert hasn't already been revoked. - // Otherwise, the re-revocation may appear on both an existing CRL and - // on a delta CRL, or a serial may be skipped from the delta CRL if - // there's an A->B->A revocation pattern and the delta was rebuilt - // after the first cert. - // - // Currently we don't store any data in the WAL entry. - var walInfo deltaWALInfo - walEntry, err := logical.StorageEntryJSON(pathPrefix+hyphenSerial, walInfo) - if err != nil { - return fmt.Errorf("unable to create delta CRL WAL entry: %w", err) - } + } + } else if !alreadyRevoked { + // Regardless of whether or not we've presently enabled Delta CRLs, + // we should always write the Delta WAL in case it is enabled in the + // future. We could trigger another full CRL rebuild instead (to avoid + // inconsistent state between the CRL and missing Delta WAL entries), + // but writing extra (unused?) WAL entries versus an expensive full + // CRL rebuild is probably a net wash. + /// + // We should only do this when the cert hasn't already been revoked. + // Otherwise, the re-revocation may appear on both an existing CRL and + // on a delta CRL, or a serial may be skipped from the delta CRL if + // there's an A->B->A revocation pattern and the delta was rebuilt + // after the first cert. + // + // Currently we don't store any data in the WAL entry. + var walInfo deltaWALInfo + walEntry, err := logical.StorageEntryJSON(deltaWALPath+normalizeSerial(serial), walInfo) + if err != nil { + return nil, fmt.Errorf("unable to create delta CRL WAL entry") + } - if err = sc.Storage.Put(sc.Context, walEntry); err != nil { - return fmt.Errorf("error saving delta CRL WAL entry: %w", err) + if err = req.Storage.Put(ctx, walEntry); err != nil { + return nil, fmt.Errorf("error saving delta CRL WAL entry") + } + + // In order for periodic delta rebuild to be mildly efficient, we + // should write the last revoked delta WAL entry so we know if we + // have new revocations that we should rebuild the delta WAL for. + lastRevSerial := lastWALInfo{Serial: serial} + lastWALEntry, err := logical.StorageEntryJSON(deltaWALLastRevokedSerial, lastRevSerial) + if err != nil { + return nil, fmt.Errorf("unable to create last delta CRL WAL entry") + } + if err = req.Storage.Put(ctx, lastWALEntry); err != nil { + return nil, fmt.Errorf("error saving last delta CRL WAL entry") + } } - // In order for periodic delta rebuild to be mildly efficient, we - // should write the last revoked delta WAL entry so we know if we - // have new revocations that we should rebuild the delta WAL for. - lastRevSerial := lastWALInfo{Serial: colonSerial} - lastWALEntry, err := logical.StorageEntryJSON(pathPrefix+deltaWALLastRevokedSerialName, lastRevSerial) - if err != nil { - return fmt.Errorf("unable to create last delta CRL WAL entry: %w", err) + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": revInfo.RevocationTime, + }, } - if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { - return fmt.Errorf("error saving last delta CRL WAL entry: %w", err) + if !revInfo.RevocationTimeUTC.IsZero() { + resp.Data["revocation_time_rfc3339"] = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano) } - - return nil + return resp, nil } -func buildCRLs(sc *storageContext, forceNew bool) ([]string, error) { +func buildCRLs(ctx context.Context, b *backend, req *logical.Request, forceNew bool) error { + sc := b.makeStorageContext(ctx, req.Storage) return buildAnyCRLs(sc, forceNew, false) } -func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, error) { +func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // In order to build all CRLs, we need knowledge of all issuers. Any two // issuers with the same keys _and_ subject should have the same CRL since // they're functionally equivalent. @@ -1207,25 +696,25 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, er // buildCRL. globalCRLConfig, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) if err != nil { - return nil, fmt.Errorf("error building CRL: while updating config: %w", err) + return fmt.Errorf("error building CRL: while updating config: %v", err) } if globalCRLConfig.Disable && !forceNew { - // We build a single long-lived (but regular validity) empty CRL in - // the event that we disable the CRL, but we don't keep updating it - // with newer, more-valid empty CRLs in the event that we later - // re-enable it. This is a historical behavior. + // We build a single long-lived empty CRL in the event that we disable + // the CRL, but we don't keep updating it with newer, more-valid empty + // CRLs in the event that we later re-enable it. This is a historical + // behavior. // // So, since tidy can now associate issuers on revocation entries, we // can skip the rest of this function and exit early without updating // anything. - return nil, nil + return nil } if !sc.Backend.useLegacyBundleCaStorage() { issuers, err = sc.listIssuers() if err != nil { - return nil, fmt.Errorf("error building CRL: while listing issuers: %w", err) + return fmt.Errorf("error building CRL: while listing issuers: %v", err) } } else { // Here, we hard-code the legacy issuer entry instead of using the @@ -1239,13 +728,13 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, er // Users should upgrade symmetrically, rather than attempting // backward compatibility for new features across disparate versions. if isDelta { - return []string{"refusing to rebuild delta CRL with legacy bundle; finish migrating to newer issuer storage layout"}, nil + return nil } } - issuersConfig, err := sc.getIssuersConfig() + config, err := sc.getIssuersConfig() if err != nil { - return nil, fmt.Errorf("error building CRLs: while getting the default config: %w", err) + return fmt.Errorf("error building CRLs: while getting the default config: %v", err) } // We map issuerID->entry for fast lookup and also issuerID->Cert for @@ -1262,7 +751,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, er // legacy path is automatically ignored. thisEntry, _, err := sc.fetchCertBundleByIssuerId(issuer, false) if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %w", issuer, err) + return fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %v", issuer, err) } if len(thisEntry.KeyID) == 0 { @@ -1287,7 +776,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, er thisCert, err := thisEntry.GetCertificate() if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %w", issuer, err) + return fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %v", issuer, err) } issuerIDCertMap[issuer] = thisCert @@ -1299,218 +788,11 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, er keySubjectIssuersMap[thisEntry.KeyID][subject] = append(keySubjectIssuersMap[thisEntry.KeyID][subject], issuer) } - // Now we do two calls: building the cluster-local CRL, and potentially - // building the global CRL if we're on the active node of the performance - // primary. - currLocalDeltaSerials, localWarnings, err := buildAnyLocalCRLs(sc, issuersConfig, globalCRLConfig, - issuers, issuerIDEntryMap, - issuerIDCertMap, keySubjectIssuersMap, - wasLegacy, forceNew, isDelta) - if err != nil { - return nil, err - } - currUnifiedDeltaSerials, unifiedWarnings, err := buildAnyUnifiedCRLs(sc, issuersConfig, globalCRLConfig, - issuers, issuerIDEntryMap, - issuerIDCertMap, keySubjectIssuersMap, - wasLegacy, forceNew, isDelta) - if err != nil { - return nil, err - } - - var warnings []string - for _, warning := range localWarnings { - warnings = append(warnings, fmt.Sprintf("warning from local CRL rebuild: %v", warning)) - } - for _, warning := range unifiedWarnings { - warnings = append(warnings, fmt.Sprintf("warning from unified CRL rebuild: %v", warning)) - } - - // Finally, we decide if we need to rebuild the Delta CRLs again, for both - // global and local CRLs if necessary. - if !isDelta { - // After we've confirmed the primary CRLs have built OK, go ahead and - // clear the delta CRL WAL and rebuild it. - if err := sc.Backend.crlBuilder.clearLocalDeltaWAL(sc, currLocalDeltaSerials); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) - } - if err := sc.Backend.crlBuilder.clearUnifiedDeltaWAL(sc, currUnifiedDeltaSerials); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) - } - deltaWarnings, err := sc.Backend.crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew) - if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %w", err) - } - for _, warning := range deltaWarnings { - warnings = append(warnings, fmt.Sprintf("warning from delta CRL rebuild: %v", warning)) - } - } - - return warnings, nil -} - -func getLastWALSerial(sc *storageContext, path string) (string, error) { - lastWALEntry, err := sc.Storage.Get(sc.Context, path) - if err != nil { - return "", err - } - - if lastWALEntry != nil && lastWALEntry.Value != nil { - var walInfo lastWALInfo - if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return "", err - } - - return walInfo.Serial, nil - } - - // No serial to return. - return "", nil -} - -func buildAnyLocalCRLs( - sc *storageContext, - issuersConfig *issuerConfigEntry, - globalCRLConfig *crlConfig, - issuers []issuerID, - issuerIDEntryMap map[issuerID]*issuerEntry, - issuerIDCertMap map[issuerID]*x509.Certificate, - keySubjectIssuersMap map[keyID]map[string][]issuerID, - wasLegacy bool, - forceNew bool, - isDelta bool, -) ([]string, []string, error) { - var err error - var warnings []string - - // Before we load cert entries, we want to store the last seen delta WAL - // serial number. The subsequent List will have at LEAST that certificate - // (and potentially more) in it; when we're done writing the delta CRL, - // we'll write this serial as a sentinel to see if we need to rebuild it - // in the future. - var lastDeltaSerial string - if isDelta { - lastDeltaSerial, err = getLastWALSerial(sc, localDeltaWALLastRevokedSerial) - if err != nil { - return nil, nil, err - } - } - - // We fetch a list of delta WAL entries prior to generating the complete - // CRL. This allows us to avoid a lock (to clear such storage): anything - // visible now, should also be visible on the complete CRL we're writing. - var currDeltaCerts []string - if !isDelta { - currDeltaCerts, err = sc.Backend.crlBuilder.getPresentLocalDeltaWALForClearing(sc) - if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) - } - } - - var unassignedCerts []pkix.RevokedCertificate - var revokedCertsMap map[issuerID][]pkix.RevokedCertificate - - // If the CRL is disabled do not bother reading in all the revoked certificates. - if !globalCRLConfig.Disable { - // Next, we load and parse all revoked certificates. We need to assign - // these certificates to an issuer. Some certificates will not be - // assignable (if they were issued by a since-deleted issuer), so we need - // a separate pool for those. - unassignedCerts, revokedCertsMap, err = getLocalRevokedCertEntries(sc, issuerIDCertMap, isDelta) - if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) - } - - if !isDelta { - // Revoking an issuer forces us to rebuild our complete CRL, - // regardless of whether or not we've enabled auto rebuilding or - // delta CRLs. If we elide the above isDelta check, this results - // in a non-empty delta CRL, containing the serial of the - // now-revoked issuer, even though it was generated _after_ the - // complete CRL with the issuer on it. There's no reason to - // duplicate this serial number on the delta, hence the above - // guard for isDelta. - if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) - } - } - } - // Fetch the cluster-local CRL mapping so we know where to write the // CRLs. - internalCRLConfig, err := sc.getLocalCRLConfig() + crlConfig, err := sc.getLocalCRLConfig() if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) - } - - rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, - issuers, issuerIDEntryMap, keySubjectIssuersMap, - unassignedCerts, revokedCertsMap, - forceNew, false /* isUnified */, isDelta) - if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: %w", err) - } - if len(rebuildWarnings) > 0 { - warnings = append(warnings, rebuildWarnings...) - } - - // Finally, persist our potentially updated local CRL config. Only do this - // if we didn't have a legacy CRL bundle. - if !wasLegacy { - if err := sc.setLocalCRLConfig(internalCRLConfig); err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) - } - } - - if isDelta { - // Update our last build time here so we avoid checking for new certs - // for a while. - sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() - - if len(lastDeltaSerial) > 0 { - // When we have a last delta serial, write out the relevant info - // so we can skip extra CRL rebuilds. - deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} - - lastDeltaBuildEntry, err := logical.StorageEntryJSON(localDeltaWALLastBuildSerial, deltaInfo) - if err != nil { - return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) - } - - err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) - if err != nil { - return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) - } - } - } - - return currDeltaCerts, warnings, nil -} - -func buildAnyUnifiedCRLs( - sc *storageContext, - issuersConfig *issuerConfigEntry, - globalCRLConfig *crlConfig, - issuers []issuerID, - issuerIDEntryMap map[issuerID]*issuerEntry, - issuerIDCertMap map[issuerID]*x509.Certificate, - keySubjectIssuersMap map[keyID]map[string][]issuerID, - wasLegacy bool, - forceNew bool, - isDelta bool, -) ([]string, []string, error) { - var err error - var warnings []string - - // Unified CRL can only be built by the main cluster. - b := sc.Backend - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - return nil, nil, nil - } - - // Unified CRL should only be built if enabled. - if !globalCRLConfig.UnifiedCRL && !forceNew { - return nil, nil, nil + return fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %v", err) } // Before we load cert entries, we want to store the last seen delta WAL @@ -1518,23 +800,20 @@ func buildAnyUnifiedCRLs( // (and potentially more) in it; when we're done writing the delta CRL, // we'll write this serial as a sentinel to see if we need to rebuild it // in the future. - // - // We need to do this per-cluster. - lastDeltaSerial := map[string]string{} + var lastDeltaSerial string if isDelta { - clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + lastWALEntry, err := sc.Storage.Get(sc.Context, deltaWALLastRevokedSerial) if err != nil { - return nil, nil, fmt.Errorf("error listing clusters for unified delta WAL building: %w", err) + return err } - for index, cluster := range clusters { - path := unifiedDeltaWALPrefix + cluster + deltaWALLastRevokedSerialName - serial, err := getLastWALSerial(sc, path) - if err != nil { - return nil, nil, fmt.Errorf("error getting last written Delta WAL serial for cluster (%v / %v): %w", index, cluster, err) + if lastWALEntry != nil && lastWALEntry.Value != nil { + var walInfo lastWALInfo + if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { + return err } - lastDeltaSerial[cluster] = serial + lastDeltaSerial = walInfo.Serial } } @@ -1543,9 +822,9 @@ func buildAnyUnifiedCRLs( // visible now, should also be visible on the complete CRL we're writing. var currDeltaCerts []string if !isDelta { - currDeltaCerts, err = sc.Backend.crlBuilder.getPresentUnifiedDeltaWALForClearing(sc) + currDeltaCerts, err = sc.Backend.crlBuilder.getPresentDeltaWALForClearing(sc) if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + return fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %v", err) } } @@ -1558,9 +837,9 @@ func buildAnyUnifiedCRLs( // these certificates to an issuer. Some certificates will not be // assignable (if they were issued by a since-deleted issuer), so we need // a separate pool for those. - unassignedCerts, revokedCertsMap, err = getUnifiedRevokedCertEntries(sc, issuerIDCertMap, isDelta) + unassignedCerts, revokedCertsMap, err = getRevokedCertEntries(sc, issuerIDCertMap, isDelta) if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + return fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %v", err) } if !isDelta { @@ -1573,87 +852,13 @@ func buildAnyUnifiedCRLs( // duplicate this serial number on the delta, hence the above // guard for isDelta. if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) - } - } - } - - // Fetch the cluster-local CRL mapping so we know where to write the - // CRLs. - internalCRLConfig, err := sc.getUnifiedCRLConfig() - if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) - } - - rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, - issuers, issuerIDEntryMap, keySubjectIssuersMap, - unassignedCerts, revokedCertsMap, - forceNew, true /* isUnified */, isDelta) - if err != nil { - return nil, nil, fmt.Errorf("error building CRLs: %w", err) - } - if len(rebuildWarnings) > 0 { - warnings = append(warnings, rebuildWarnings...) - } - - // Finally, persist our potentially updated local CRL config. Only do this - // if we didn't have a legacy CRL bundle. - if !wasLegacy { - if err := sc.setUnifiedCRLConfig(internalCRLConfig); err != nil { - return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) - } - } - - if isDelta { - // Update our last build time here so we avoid checking for new certs - // for a while. - sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() - - // Persist all of our known last revoked serial numbers here, as the - // last seen serial during build. This will allow us to detect if any - // new revocations have occurred, forcing us to rebuild the delta CRL. - for cluster, serial := range lastDeltaSerial { - if len(serial) == 0 { - continue - } - - // Make sure to use the cluster-specific path. Since we're on the - // active node of the primary cluster, we own this entry and can - // safely write it. - path := unifiedDeltaWALPrefix + cluster + deltaWALLastBuildSerialName - deltaInfo := lastDeltaInfo{Serial: serial} - lastDeltaBuildEntry, err := logical.StorageEntryJSON(path, deltaInfo) - if err != nil { - return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) - } - - err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) - if err != nil { - return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + return fmt.Errorf("error building CRLs: unable to parse revoked issuers: %v", err) } } } - return currDeltaCerts, warnings, nil -} - -func buildAnyCRLsWithCerts( - sc *storageContext, - issuersConfig *issuerConfigEntry, - globalCRLConfig *crlConfig, - internalCRLConfig *internalCRLConfigEntry, - issuers []issuerID, - issuerIDEntryMap map[issuerID]*issuerEntry, - keySubjectIssuersMap map[keyID]map[string][]issuerID, - unassignedCerts []pkix.RevokedCertificate, - revokedCertsMap map[issuerID][]pkix.RevokedCertificate, - forceNew bool, - isUnified bool, - isDelta bool, -) ([]string, error) { // Now we can call buildCRL once, on an arbitrary/representative issuer // from each of these (keyID, subject) sets. - var warnings []string for _, subjectIssuersMap := range keySubjectIssuersMap { for _, issuersSet := range subjectIssuersMap { if len(issuersSet) == 0 { @@ -1679,7 +884,7 @@ func buildAnyCRLsWithCerts( // If it is, we'll also pull in the unassigned certs to remain // compatible with Vault's earlier, potentially questionable // behavior. - if issuerId == issuersConfig.DefaultIssuerId { + if issuerId == config.DefaultIssuerId { if len(unassignedCerts) > 0 { revokedCerts = append(revokedCerts, unassignedCerts...) } @@ -1699,9 +904,9 @@ func buildAnyCRLsWithCerts( } // Finally, check our crlIdentifier. - if thisCRLId, ok := internalCRLConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { + if thisCRLId, ok := crlConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { if len(crlIdentifier) > 0 && crlIdentifier != thisCRLId { - return nil, fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) + return fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) } crlIdentifier = thisCRLId @@ -1713,48 +918,30 @@ func buildAnyCRLsWithCerts( // Skip this set for the time being; while we have valid // issuers and associated keys, this occurred because we lack // crl-signing usage on all issuers in this set. - // - // But, tell the user about this, so they can either correct - // this by reissuing the CA certificate or adding an equivalent - // version with KU bits if the CA cert lacks KU altogether. - // - // See also: https://github.com/hashicorp/vault/issues/20137 - warning := "Issuer equivalency set with associated keys lacked an issuer with CRL Signing KeyUsage; refusing to rebuild CRL for this group of issuers: " - var issuers []string - for _, issuerId := range issuersSet { - issuers = append(issuers, issuerId.String()) - } - warning += strings.Join(issuers, ",") - - // We only need this warning once. :-) - if !isUnified && !isDelta { - warnings = append(warnings, warning) - } - continue } if len(crlIdentifier) == 0 { // Create a new random UUID for this CRL if none exists. crlIdentifier = genCRLId() - internalCRLConfig.CRLNumberMap[crlIdentifier] = 1 + crlConfig.CRLNumberMap[crlIdentifier] = 1 } // Update all issuers in this group to set the CRL Issuer for _, issuerId := range issuersSet { - internalCRLConfig.IssuerIDCRLMap[issuerId] = crlIdentifier + crlConfig.IssuerIDCRLMap[issuerId] = crlIdentifier } // We always update the CRL Number since we never want to // duplicate numbers and missing numbers is fine. - crlNumber := internalCRLConfig.CRLNumberMap[crlIdentifier] - internalCRLConfig.CRLNumberMap[crlIdentifier] += 1 + crlNumber := crlConfig.CRLNumberMap[crlIdentifier] + crlConfig.CRLNumberMap[crlIdentifier] += 1 // CRLs (regardless of complete vs delta) are incrementally // numbered. But delta CRLs need to know the number of the // last complete CRL. We assume that's the previous identifier // if no value presently exists. - lastCompleteNumber, haveLast := internalCRLConfig.LastCompleteNumberMap[crlIdentifier] + lastCompleteNumber, haveLast := crlConfig.LastCompleteNumberMap[crlIdentifier] if !haveLast { // We use the value of crlNumber for the current CRL, so // decrement it by one to find the last one. @@ -1763,24 +950,24 @@ func buildAnyCRLsWithCerts( // Update `LastModified` if isDelta { - internalCRLConfig.DeltaLastModified = time.Now().UTC() + crlConfig.DeltaLastModified = time.Now().UTC() } else { - internalCRLConfig.LastModified = time.Now().UTC() + crlConfig.LastModified = time.Now().UTC() } // Lastly, build the CRL. - nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isUnified, isDelta, lastCompleteNumber) + nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isDelta, lastCompleteNumber) if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %w", representative, err) + return fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %v", representative, err) } - internalCRLConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate + crlConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate if !isDelta { - internalCRLConfig.LastCompleteNumberMap[crlIdentifier] = crlNumber + crlConfig.LastCompleteNumberMap[crlIdentifier] = crlNumber } else if !haveLast { // Since we're writing this config anyways, save our guess // as to the last CRL number. - internalCRLConfig.LastCompleteNumberMap[crlIdentifier] = lastCompleteNumber + crlConfig.LastCompleteNumberMap[crlIdentifier] = lastCompleteNumber } } } @@ -1796,7 +983,7 @@ func buildAnyCRLsWithCerts( // root deletion behavior, but using soft issuer deletes. If there is an // alternate, equivalent issuer however, we'll keep updating the shared // CRL; all equivalent issuers must have their CRLs disabled. - for mapIssuerId := range internalCRLConfig.IssuerIDCRLMap { + for mapIssuerId := range crlConfig.IssuerIDCRLMap { stillHaveIssuer := false for _, listedIssuerId := range issuers { if mapIssuerId == listedIssuerId { @@ -1806,12 +993,12 @@ func buildAnyCRLsWithCerts( } if !stillHaveIssuer { - delete(internalCRLConfig.IssuerIDCRLMap, mapIssuerId) + delete(crlConfig.IssuerIDCRLMap, mapIssuerId) } } - for crlId := range internalCRLConfig.CRLNumberMap { + for crlId := range crlConfig.CRLNumberMap { stillHaveIssuerForID := false - for _, remainingCRL := range internalCRLConfig.IssuerIDCRLMap { + for _, remainingCRL := range crlConfig.IssuerIDCRLMap { if remainingCRL == crlId { stillHaveIssuerForID = true break @@ -1820,13 +1007,52 @@ func buildAnyCRLsWithCerts( if !stillHaveIssuerForID { if err := sc.Storage.Delete(sc.Context, "crls/"+crlId.String()); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %w", err) + return fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %v", err) + } + } + } + + // Finally, persist our potentially updated local CRL config. Only do this + // if we didn't have a legacy CRL bundle. + if !wasLegacy { + if err := sc.setLocalCRLConfig(crlConfig); err != nil { + return fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %v", err) + } + } + + if !isDelta { + // After we've confirmed the primary CRLs have built OK, go ahead and + // clear the delta CRL WAL and rebuild it. + if err := sc.Backend.crlBuilder.clearDeltaWAL(sc, currDeltaCerts); err != nil { + return fmt.Errorf("error building CRLs: unable to clear Delta WAL: %v", err) + } + if err := sc.Backend.crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew); err != nil { + return fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %v", err) + } + } else { + // Update our last build time here so we avoid checking for new certs + // for a while. + sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() + + if len(lastDeltaSerial) > 0 { + // When we have a last delta serial, write out the relevant info + // so we can skip extra CRL rebuilds. + deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} + + lastDeltaBuildEntry, err := logical.StorageEntryJSON(deltaWALLastBuildSerial, deltaInfo) + if err != nil { + return fmt.Errorf("error creating last delta CRL rebuild serial entry: %v", err) + } + + err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) + if err != nil { + return fmt.Errorf("error persisting last delta CRL rebuild info: %v", err) } } } // All good :-) - return warnings, nil + return nil } func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID]*x509.Certificate) bool { @@ -1854,13 +1080,13 @@ func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509. return false } -func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { +func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { var unassignedCerts []pkix.RevokedCertificate revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) listingPath := revokedPath if isDelta { - listingPath = localDeltaWALPath + listingPath = deltaWALPath } revokedSerials, err := sc.Storage.List(sc.Context, listingPath) @@ -1970,108 +1196,12 @@ func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID // we should update the entry to make future CRL builds faster. revokedEntry, err = logical.StorageEntryJSON(revokedPath+serial, revInfo) if err != nil { - return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v: %w", serial, err) + return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v", serial) } err = sc.Storage.Put(sc.Context, revokedEntry) if err != nil { - return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v: %w", serial, err) - } - } - } - - return unassignedCerts, revokedCertsMap, nil -} - -func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { - // Getting unified revocation entries is a bit different than getting - // the local ones. In particular, the full copy of the certificate is - // unavailable, so we'll be able to avoid parsing the stored certificate, - // at the expense of potentially having incorrect issuer mappings. - var unassignedCerts []pkix.RevokedCertificate - revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) - - listingPath := unifiedRevocationReadPathPrefix - if isDelta { - listingPath = unifiedDeltaWALPrefix - } - - // First, we find all clusters that have written certificates. - clusterIds, err := sc.Storage.List(sc.Context, listingPath) - if err != nil { - return nil, nil, fmt.Errorf("failed to list clusters for unified CRL building: %w", err) - } - - // We wish to prevent duplicate revocations on separate clusters from - // being added multiple times to the CRL. While we can't guarantee these - // are the same certificate, it doesn't matter as (as long as they have - // the same issuer), it'd imply issuance of two certs with the same - // serial which'd be an intentional violation of RFC 5280 before importing - // an issuer into Vault, and would be highly unlikely within Vault, due - // to 120-bit random serial numbers. - foundSerials := make(map[string]bool) - - // Then for every cluster, we find its revoked certificates... - for _, clusterId := range clusterIds { - if !strings.HasSuffix(clusterId, "/") { - // No entries - continue - } - - clusterPath := listingPath + clusterId - serials, err := sc.Storage.List(sc.Context, clusterPath) - if err != nil { - return nil, nil, fmt.Errorf("failed to list serials in cluster (%v) for unified CRL building: %w", clusterId, err) - } - - // At this point, we need the storage entry. Rather than using the - // clusterPath and adding the serial, we need to use the true - // cross-cluster revocation entry (as, our above listing might have - // used delta WAL entires without the full revocation info). - serialPrefix := unifiedRevocationReadPathPrefix + clusterId - for _, serial := range serials { - if isDelta && (serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName) { - // Skip our placeholder entries... - continue - } - - serialPath := serialPrefix + serial - entryRaw, err := sc.Storage.Get(sc.Context, serialPath) - if err != nil { - return nil, nil, fmt.Errorf("failed to read unified revocation entry in cluster (%v) for unified CRL building: %w", clusterId, err) - } - if entryRaw == nil { - // Skip empty entries. We'll eventually tidy them. - continue - } - - var xRevEntry unifiedRevocationEntry - if err := entryRaw.DecodeJSON(&xRevEntry); err != nil { - return nil, nil, fmt.Errorf("failed json decoding of unified revocation entry at path %v: %w ", serialPath, err) - } - - // Convert to pkix.RevokedCertificate entries. - var revEntry pkix.RevokedCertificate - var ok bool - revEntry.SerialNumber, ok = serialToBigInt(serial) - if !ok { - return nil, nil, fmt.Errorf("failed to encode serial for CRL building: %v", serial) - } - - revEntry.RevocationTime = xRevEntry.RevocationTimeUTC - - if found, inFoundMap := foundSerials[normalizeSerial(serial)]; found && inFoundMap { - // Serial has already been added to the CRL. - continue - } - foundSerials[normalizeSerial(serial)] = true - - // Finally, add it to the correct mapping. - _, present := issuerIDCertMap[xRevEntry.CertificateIssuer] - if !present { - unassignedCerts = append(unassignedCerts, revEntry) - } else { - revokedCertsMap[xRevEntry.CertificateIssuer] = append(revokedCertsMap[xRevEntry.CertificateIssuer], revEntry) + return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v", serial) } } } @@ -2115,7 +1245,7 @@ func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issue // Builds a CRL by going through the list of revoked certificates and building // a new CRL with the stored revocation times and serial numbers. -func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64, isUnified bool, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { +func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { var revokedCerts []pkix.RevokedCertificate crlLifetime, err := time.ParseDuration(crlInfo.Expiry) @@ -2159,7 +1289,7 @@ WRITE: if isDelta { ext, err := certutil.CreateDeltaCRLIndicatorExt(lastCompleteNumber) if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %w", err) + return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) } extensions = []pkix.Extension{ext} } @@ -2183,15 +1313,9 @@ WRITE: // Ignore the CRL ID as it won't be persisted anyways; hard-code the // old legacy path and allow it to be updated. writePath = legacyCRLPath - } else { - if isUnified { - writePath = unifiedCRLPathPrefix + writePath - } - - if isDelta { - // Write the delta CRL to a unique storage location. - writePath += deltaCRLPathSuffix - } + } else if isDelta { + // Write the delta CRL to a unique storage location. + writePath += deltaCRLPathSuffix } err = sc.Storage.Put(sc.Context, &logical.StorageEntry{ @@ -2204,9 +1328,3 @@ WRITE: return &nextUpdate, nil } - -// shouldLocalPathsUseUnified assuming a legacy path for a CRL/OCSP request, does our -// configuration say we should be returning the unified response or not -func shouldLocalPathsUseUnified(cfg *crlConfig) bool { - return cfg.UnifiedCRL && cfg.UnifiedCRLOnExistingPaths -} diff --git a/builtin/logical/pki/dnstest/server.go b/builtin/logical/pki/dnstest/server.go deleted file mode 100644 index 87091ed5eb60e..0000000000000 --- a/builtin/logical/pki/dnstest/server.go +++ /dev/null @@ -1,415 +0,0 @@ -package dnstest - -import ( - "context" - "fmt" - "net" - "strings" - "sync" - "testing" - "time" - - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/stretchr/testify/require" -) - -type TestServer struct { - t *testing.T - ctx context.Context - - runner *docker.Runner - network string - startup *docker.Service - - lock sync.Mutex - serial int - forwarders []string - domains []string - records map[string]map[string][]string // domain -> record -> value(s). - - cleanup func() -} - -func SetupResolver(t *testing.T, domain string) *TestServer { - return SetupResolverOnNetwork(t, domain, "") -} - -func SetupResolverOnNetwork(t *testing.T, domain string, network string) *TestServer { - var ts TestServer - ts.t = t - ts.ctx = context.Background() - ts.domains = []string{domain} - ts.records = map[string]map[string][]string{} - ts.network = network - - ts.setupRunner(domain, network) - ts.startContainer(network) - ts.PushConfig() - - return &ts -} - -func (ts *TestServer) setupRunner(domain string, network string) { - var err error - ts.runner, err = docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "ubuntu/bind9", - ImageTag: "latest", - ContainerName: "bind9-dns-" + strings.ReplaceAll(domain, ".", "-"), - NetworkName: network, - Ports: []string{"53/udp"}, - LogConsumer: func(s string) { - ts.t.Logf(s) - }, - }) - require.NoError(ts.t, err) -} - -func (ts *TestServer) startContainer(network string) { - connUpFunc := func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { - // Perform a simple connection to this resolver, even though the - // default configuration doesn't do anything useful. - peer, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", host, port)) - if err != nil { - return nil, fmt.Errorf("failed to resolve peer: %v / %v: %w", host, port, err) - } - - conn, err := net.DialUDP("udp", nil, peer) - if err != nil { - return nil, fmt.Errorf("failed to dial peer: %v / %v / %v: %w", host, port, peer, err) - } - defer conn.Close() - - _, err = conn.Write([]byte("garbage-in")) - if err != nil { - return nil, fmt.Errorf("failed to write to peer: %v / %v / %v: %w", host, port, peer, err) - } - - // Connection worked. - return docker.NewServiceHostPort(host, port), nil - } - - result, _, err := ts.runner.StartNewService(ts.ctx, true, true, connUpFunc) - require.NoError(ts.t, err, "failed to start dns resolver for "+ts.domains[0]) - ts.startup = result - - if ts.startup.StartResult.RealIP == "" { - mapping, err := ts.runner.GetNetworkAndAddresses(ts.startup.Container.ID) - require.NoError(ts.t, err, "failed to fetch network addresses to correct missing real IP address") - if len(network) == 0 { - require.Equal(ts.t, 1, len(mapping), "expected exactly one network address") - for network = range mapping { - // Because mapping is a map of network name->ip, we need - // to use the above range's assignment to get the name, - // as there is no other way of getting the keys of a map. - } - } - require.Contains(ts.t, mapping, network, "expected network to be part of the mapping") - ts.startup.StartResult.RealIP = mapping[network] - } - - ts.t.Logf("[dnsserv] Addresses of DNS resolver: local=%v / container=%v", ts.GetLocalAddr(), ts.GetRemoteAddr()) -} - -func (ts *TestServer) buildNamedConf() string { - forwarders := "\n" - if len(ts.forwarders) > 0 { - forwarders = "\tforwarders {\n" - for _, forwarder := range ts.forwarders { - forwarders += "\t\t" + forwarder + ";\n" - } - forwarders += "\t};\n" - } - - zones := "\n" - for _, domain := range ts.domains { - zones += fmt.Sprintf("zone \"%s\" {\n", domain) - zones += "\ttype primary;\n" - zones += fmt.Sprintf("\tfile \"%s.zone\";\n", domain) - zones += "\tallow-update {\n\t\tnone;\n\t};\n" - zones += "\tnotify no;\n" - zones += "};\n\n" - } - - // Reverse lookups are not handles as they're not presently necessary. - - cfg := `options { - directory "/var/cache/bind"; - - dnssec-validation no; - - ` + forwarders + ` -}; - -` + zones - - return cfg -} - -func (ts *TestServer) buildZoneFile(target string) string { - // One second TTL by default to allow quick refreshes. - zone := "$TTL 1;\n" - - ts.serial += 1 - zone += fmt.Sprintf("@\tIN\tSOA\tns.%v.\troot.%v.\t(\n", target, target) - zone += fmt.Sprintf("\t\t\t%d;\n\t\t\t1;\n\t\t\t1;\n\t\t\t2;\n\t\t\t1;\n\t\t\t)\n\n", ts.serial) - zone += fmt.Sprintf("@\tIN\tNS\tns%d.%v.\n", ts.serial, target) - zone += fmt.Sprintf("ns%d.%v.\tIN\tA\t%v\n", ts.serial, target, "127.0.0.1") - - for domain, records := range ts.records { - if !strings.HasSuffix(domain, target) { - continue - } - - for recordType, values := range records { - for _, value := range values { - zone += fmt.Sprintf("%s.\tIN\t%s\t%s\n", domain, recordType, value) - } - } - } - - return zone -} - -func (ts *TestServer) pushNamedConf() { - contents := docker.NewBuildContext() - cfgPath := "/etc/bind/named.conf.options" - namedCfg := ts.buildNamedConf() - contents[cfgPath] = docker.PathContentsFromString(namedCfg) - contents[cfgPath].SetOwners(0, 142) // root, bind - - ts.t.Logf("Generated bind9 config (%s):\n%v\n", cfgPath, namedCfg) - - err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) - require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") -} - -func (ts *TestServer) pushZoneFiles() { - contents := docker.NewBuildContext() - - for _, domain := range ts.domains { - path := "/var/cache/bind/" + domain + ".zone" - zoneFile := ts.buildZoneFile(domain) - contents[path] = docker.PathContentsFromString(zoneFile) - contents[path].SetOwners(0, 142) // root, bind - - ts.t.Logf("Generated bind9 zone file for %v (%s):\n%v\n", domain, path, zoneFile) - } - - err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) - require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") -} - -func (ts *TestServer) PushConfig() { - ts.lock.Lock() - defer ts.lock.Unlock() - - // There's two cases here: - // - // 1. We've added a new top-level domain name. Here, we want to make - // sure the new zone file is pushed before we push the reference - // to it. - // 2. We've just added a new. Here, the order doesn't matter, but - // mostly likely the second push will be a no-op. - ts.pushZoneFiles() - ts.pushNamedConf() - - // Wait until our config has taken. - corehelpers.RetryUntil(ts.t, 15*time.Second, func() error { - // bind reloads based on file mtime, touch files before starting - // to make sure it has been updated more recently than when the - // last update was written. Then issue a new SIGHUP. - for _, domain := range ts.domains { - path := "/var/cache/bind/" + domain + ".zone" - touchCmd := []string{"touch", path} - - _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, touchCmd) - if err != nil { - return fmt.Errorf("failed to update zone mtime: %w", err) - } - } - ts.runner.DockerAPI.ContainerKill(ts.ctx, ts.startup.Container.ID, "SIGHUP") - - // Connect to our bind resolver. - resolver := &net.Resolver{ - PreferGo: true, - StrictErrors: false, - Dial: func(ctx context.Context, network, address string) (net.Conn, error) { - d := net.Dialer{ - Timeout: 10 * time.Second, - } - return d.DialContext(ctx, network, ts.GetLocalAddr()) - }, - } - - // last domain has the given serial number, which also appears in the - // NS record so we can fetch it via Go. - lastDomain := ts.domains[len(ts.domains)-1] - records, err := resolver.LookupNS(ts.ctx, lastDomain) - if err != nil { - return fmt.Errorf("failed to lookup NS record for %v: %w", lastDomain, err) - } - - if len(records) != 1 { - return fmt.Errorf("expected only 1 NS record for %v, got %v/%v", lastDomain, len(records), records) - } - - expectedNS := fmt.Sprintf("ns%d.%v.", ts.serial, lastDomain) - if records[0].Host != expectedNS { - return fmt.Errorf("expected to find NS %v, got %v indicating reload hadn't completed", expectedNS, records[0]) - } - - return nil - }) -} - -func (ts *TestServer) GetLocalAddr() string { - return ts.startup.Config.Address() -} - -func (ts *TestServer) GetRemoteAddr() string { - return fmt.Sprintf("%s:%d", ts.startup.StartResult.RealIP, 53) -} - -func (ts *TestServer) AddDomain(domain string) { - ts.lock.Lock() - defer ts.lock.Unlock() - - for _, existing := range ts.domains { - if existing == domain { - return - } - } - - ts.domains = append(ts.domains, domain) -} - -func (ts *TestServer) AddRecord(domain string, record string, value string) { - ts.lock.Lock() - defer ts.lock.Unlock() - - foundDomain := false - for _, existing := range ts.domains { - if strings.HasSuffix(domain, existing) { - foundDomain = true - break - } - } - if !foundDomain { - ts.t.Fatalf("cannot add record %v/%v :: [%v] -- no domain zone matching (%v)", record, domain, value, ts.domains) - } - - value = strings.TrimSpace(value) - if _, present := ts.records[domain]; !present { - ts.records[domain] = map[string][]string{} - } - - if values, present := ts.records[domain][record]; present { - for _, candidate := range values { - if candidate == value { - // Already present; skip adding. - return - } - } - } - - ts.records[domain][record] = append(ts.records[domain][record], value) -} - -func (ts *TestServer) RemoveRecord(domain string, record string, value string) { - ts.lock.Lock() - defer ts.lock.Unlock() - - foundDomain := false - for _, existing := range ts.domains { - if strings.HasSuffix(domain, existing) { - foundDomain = true - break - } - } - if !foundDomain { - // Not found. - return - } - - value = strings.TrimSpace(value) - if _, present := ts.records[domain]; !present { - // Not found. - return - } - - var remaining []string - if values, present := ts.records[domain][record]; present { - for _, candidate := range values { - if candidate != value { - remaining = append(remaining, candidate) - } - } - } - - ts.records[domain][record] = remaining -} - -func (ts *TestServer) RemoveRecordsOfTypeForDomain(domain string, record string) { - ts.lock.Lock() - defer ts.lock.Unlock() - - foundDomain := false - for _, existing := range ts.domains { - if strings.HasSuffix(domain, existing) { - foundDomain = true - break - } - } - if !foundDomain { - // Not found. - return - } - - if _, present := ts.records[domain]; !present { - // Not found. - return - } - - delete(ts.records[domain], record) -} - -func (ts *TestServer) RemoveRecordsForDomain(domain string) { - ts.lock.Lock() - defer ts.lock.Unlock() - - foundDomain := false - for _, existing := range ts.domains { - if strings.HasSuffix(domain, existing) { - foundDomain = true - break - } - } - if !foundDomain { - // Not found. - return - } - - if _, present := ts.records[domain]; !present { - // Not found. - return - } - - ts.records[domain] = map[string][]string{} -} - -func (ts *TestServer) RemoveAllRecords() { - ts.lock.Lock() - defer ts.lock.Unlock() - - ts.records = map[string]map[string][]string{} -} - -func (ts *TestServer) Cleanup() { - if ts.cleanup != nil { - ts.cleanup() - } - if ts.startup != nil && ts.startup.Cleanup != nil { - ts.startup.Cleanup() - } -} diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index b29353985e216..a509cd0090db9 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -156,16 +153,6 @@ The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, of the ca_chain field.`, } - fields["user_ids"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `The requested user_ids value to place in the subject, -if any, in a comma-delimited list. Restricted by allowed_user_ids. -Any values are added with OID 0.9.2342.19200300.100.1.1.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "User ID(s)", - }, - } - fields = addIssuerRefField(fields) return fields @@ -468,47 +455,6 @@ on revocation entries. This helps increase the performance of CRL building and OCSP responses.`, } - fields["tidy_expired_issuers"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to automatically remove expired issuers -past the issuer_safety_buffer. No keys will be removed as part of this -operation.`, - } - - fields["tidy_move_legacy_ca_bundle"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to move the legacy ca_bundle from -/config/ca_bundle to /config/ca_bundle.bak. This prevents downgrades -to pre-Vault 1.11 versions (as older PKI engines do not know about -the new multi-issuer storage layout), but improves the performance -on seal wrapped PKI mounts. This will only occur if at least -issuer_safety_buffer time has occurred after the initial storage -migration. - -This backup is saved in case of an issue in future migrations. -Operators may consider removing it via sys/raw if they desire. -The backup will be removed via a DELETE /root call, but note that -this removes ALL issuers within the mount (and is thus not desirable -in most operational scenarios).`, - } - - fields["tidy_acme"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to enable tidying ACME accounts, -orders and authorizations. ACME orders are tidied (deleted) -safety_buffer after the certificate associated with them expires, -or after the order and relevant authorizations have expired if no -certificate was produced. Authorizations are tidied with the -corresponding order. - -When a valid ACME Account is at least acme_account_safety_buffer -old, and has no remaining orders associated with it, the account is -marked as revoked. After another acme_account_safety_buffer has -passed from the revocation or deactivation date, a revoked or -deactivated ACME account is deleted.`, - Default: false, - } - fields["safety_buffer"] = &framework.FieldSchema{ Type: framework.TypeDurationSecond, Description: `The amount of extra time that must have passed @@ -518,23 +464,6 @@ Defaults to 72 hours.`, Default: int(defaultTidyConfig.SafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int } - fields["issuer_safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of extra time that must have passed -beyond issuer's expiration before it is removed -from the backend storage. -Defaults to 8760 hours (1 year).`, - Default: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["acme_account_safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of time that must pass after creation -that an account with no orders is marked revoked, and the amount of time -after being marked revoked or deactivated.`, - Default: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - fields["pause_duration"] = &framework.FieldSchema{ Type: framework.TypeString, Description: `The amount of time to wait between processing @@ -547,99 +476,5 @@ greater period of time. By default this is zero seconds.`, Default: "0s", } - fields["tidy_revocation_queue"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to remove stale revocation queue entries -that haven't been confirmed by any active cluster. Only runs on the -active primary node`, - Default: defaultTidyConfig.RevocationQueue, - } - - fields["revocation_queue_safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of time that must pass from the -cross-cluster revocation request being initiated to when it will be -slated for removal. Setting this too low may remove valid revocation -requests before the owning cluster has a chance to process them, -especially if the cluster is offline.`, - Default: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["tidy_cross_cluster_revoked_certs"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to enable tidying up -the cross-cluster revoked certificate store. Only runs on the active -primary node.`, - } - - return fields -} - -// generate the entire list of schema fields we need for CSR sign verbatim, this is also -// leveraged by ACME internally. -func getCsrSignVerbatimSchemaFields() map[string]*framework.FieldSchema { - fields := map[string]*framework.FieldSchema{} - fields = addNonCACommonFields(fields) - fields = addSignVerbatimRoleFields(fields) - - fields["csr"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "", - Description: `PEM-format CSR to be signed. Values will be -taken verbatim from the CSR, except for -basic constraints.`, - } - - return fields -} - -// addSignVerbatimRoleFields provides the fields and defaults to be used by anything that is building up the fields -// and their corresponding default values when generating/using a sign-verbatim type role such as buildSignVerbatimRole. -func addSignVerbatimRoleFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, - Description: `A comma-separated string or list of key usages (not extended -key usages). Valid values can be found at -https://golang.org/pkg/crypto/x509/#KeyUsage --- simply drop the "KeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - fields["ext_key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{}, - Description: `A comma-separated string or list of extended key usages. Valid values can be found at -https://golang.org/pkg/crypto/x509/#ExtKeyUsage --- simply drop the "ExtKeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - fields["ext_key_usage_oids"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `A comma-separated string or list of extended key usage oids.`, - } - - fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - return fields } diff --git a/builtin/logical/pki/integation_test.go b/builtin/logical/pki/integation_test.go new file mode 100644 index 0000000000000..d71909981c6a1 --- /dev/null +++ b/builtin/logical/pki/integation_test.go @@ -0,0 +1,492 @@ +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "testing" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func TestIntegration_RotateRootUsesNext(t *testing.T) { + t.Parallel() + b, s := createBackendWithStorage(t) + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/rotate/internal", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed rotate root") + require.NotNil(t, resp, "got nil response from rotate root") + require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) + + issuerId1 := resp.Data["issuer_id"].(issuerID) + issuerName1 := resp.Data["issuer_name"] + + require.NotEmpty(t, issuerId1, "issuer id was empty on initial rotate root command") + require.Equal(t, "next", issuerName1, "expected an issuer name of next on initial rotate root command") + + // Call it again, we should get a new issuer id, but since next issuer_name is used we should get a blank value. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/rotate/internal", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed rotate root") + require.NotNil(t, resp, "got nil response from rotate root") + require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) + + issuerId2 := resp.Data["issuer_id"].(issuerID) + issuerName2 := resp.Data["issuer_name"] + + require.NotEmpty(t, issuerId2, "issuer id was empty on second rotate root command") + require.NotEqual(t, issuerId1, issuerId2, "should have been different issuer ids") + require.Empty(t, issuerName2, "expected a blank issuer name on the second rotate root command") + + // Call it again, making sure we can use our own name if desired. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/rotate/internal", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + "issuer_name": "next-cert", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed rotate root") + require.NotNil(t, resp, "got nil response from rotate root") + require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) + + issuerId3 := resp.Data["issuer_id"].(issuerID) + issuerName3 := resp.Data["issuer_name"] + + require.NotEmpty(t, issuerId3, "issuer id was empty on third rotate root command") + require.NotEqual(t, issuerId3, issuerId1, "should have been different issuer id from initial") + require.NotEqual(t, issuerId3, issuerId2, "should have been different issuer id from second call") + require.Equal(t, "next-cert", issuerName3, "expected an issuer name that we specified on third rotate root command") +} + +func TestIntegration_ReplaceRootNormal(t *testing.T) { + t.Parallel() + b, s := createBackendWithStorage(t) + + // generate roots + genTestRootCa(t, b, s) + issuerId2, _ := genTestRootCa(t, b, s) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": issuerId2.String(), + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + replacedIssuer := resp.Data["default"] + require.Equal(t, issuerId2, replacedIssuer, "expected return value to match issuer we set") + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/issuers", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + defaultIssuer := resp.Data["default"] + require.Equal(t, issuerId2, defaultIssuer, "expected default issuer to be updated") +} + +func TestIntegration_ReplaceRootDefaultsToNext(t *testing.T) { + t.Parallel() + b, s := createBackendWithStorage(t) + + // generate roots + genTestRootCa(t, b, s) + issuerId2, _ := genTestRootCaWithIssuerName(t, b, s, "next") + + // Do not specify the default value to replace. + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{}, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + replacedIssuer := resp.Data["default"] + require.Equal(t, issuerId2, replacedIssuer, "expected return value to match the 'next' issuer we set") + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/issuers", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + defaultIssuer := resp.Data["default"] + require.Equal(t, issuerId2, defaultIssuer, "expected default issuer to be updated") +} + +func TestIntegration_ReplaceRootBadIssuer(t *testing.T) { + t.Parallel() + b, s := createBackendWithStorage(t) + + // generate roots + genTestRootCa(t, b, s) + genTestRootCa(t, b, s) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": "a-bad-issuer-id", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root, should have been an error in the response.") + require.NotNil(t, resp, "got nil response from replacing root") + require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) + + // Make sure we trap replacing with default. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": "default", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root, should have been an error in the response.") + require.NotNil(t, resp, "got nil response from replacing root") + require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) + + // Make sure we trap replacing with blank string. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": "", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root, should have been an error in the response.") + require.NotNil(t, resp, "got nil response from replacing root") + require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) +} + +func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { + t.Parallel() + rootBackend, rootStorage := createBackendWithStorage(t) + intBackend, intStorage := createBackendWithStorage(t) + + // generate root + resp, err := rootBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/root/internal", + Storage: rootStorage, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + rootCert := resp.Data["certificate"].(string) + + // generate intermediate + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/intermediate/internal", + Storage: intStorage, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed generating int ca") + require.NotNil(t, resp, "got nil response from generating int ca") + require.False(t, resp.IsError(), "got an error from generating int ca: %#v", resp) + intCsr := resp.Data["csr"].(string) + + // sign csr + resp, err = rootBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Storage: rootStorage, + Data: map[string]interface{}{ + "csr": intCsr, + "format": "pem_bundle", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + + intCert := resp.Data["certificate"].(string) + + // Send in the chain backwards now and make sure we link intCert as default. + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "intermediate/set-signed", + Storage: intStorage, + Data: map[string]interface{}{ + "certificate": rootCert + "\n" + intCert + "\n", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + + // setup role + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/example", + Storage: intStorage, + Data: map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed setting up role example") + require.Nil(t, resp, "got non-nil response from setting up role example: %#v", resp) + + // Issue cert + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/example", + Storage: intStorage, + Data: map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed issuing a leaf cert from int ca") + require.NotNil(t, resp, "got nil response issuing a leaf cert from int ca") + require.False(t, resp.IsError(), "got an error issuing a leaf cert from int ca: %#v", resp) +} + +func TestIntegration_CSRGeneration(t *testing.T) { + t.Parallel() + b, s := createBackendWithStorage(t) + testCases := []struct { + keyType string + usePss bool + keyBits int + sigBits int + expectedPublicKeyType crypto.PublicKey + expectedSignature x509.SignatureAlgorithm + }{ + {"rsa", false, 2048, 0, &rsa.PublicKey{}, x509.SHA256WithRSA}, + {"rsa", false, 2048, 384, &rsa.PublicKey{}, x509.SHA384WithRSA}, + // Add back once https://github.com/golang/go/issues/45990 is fixed. + // {"rsa", true, 2048, 0, &rsa.PublicKey{}, x509.SHA256WithRSAPSS}, + // {"rsa", true, 2048, 512, &rsa.PublicKey{}, x509.SHA512WithRSAPSS}, + {"ec", false, 224, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA256}, + {"ec", false, 256, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA256}, + {"ec", false, 384, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA384}, + {"ec", false, 521, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA512}, + {"ec", false, 521, 224, &ecdsa.PublicKey{}, x509.ECDSAWithSHA512}, // We ignore signature_bits for ec + {"ed25519", false, 0, 0, ed25519.PublicKey{}, x509.PureEd25519}, // We ignore both fields for ed25519 + } + for _, tc := range testCases { + keyTypeName := tc.keyType + if tc.usePss { + keyTypeName = tc.keyType + "-pss" + } + testName := fmt.Sprintf("%s-%d-%d", keyTypeName, tc.keyBits, tc.sigBits) + t.Run(testName, func(t *testing.T) { + resp, err := CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + "key_type": tc.keyType, + "key_bits": tc.keyBits, + "signature_bits": tc.sigBits, + "use_pss": tc.usePss, + }) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "csr") + + csrString := resp.Data["csr"].(string) + pemBlock, _ := pem.Decode([]byte(csrString)) + require.NotNil(t, pemBlock, "failed to parse returned csr pem block") + csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) + require.NoError(t, err, "failed parsing certificate request") + + require.Equal(t, tc.expectedSignature, csr.SignatureAlgorithm, + "Expected %s, got %s", tc.expectedSignature.String(), csr.SignatureAlgorithm.String()) + require.IsType(t, tc.expectedPublicKeyType, csr.PublicKey) + }) + } +} + +func TestIntegration_AutoIssuer(t *testing.T) { + t.Parallel() + b, s := createBackendWithStorage(t) + + // Generate two roots. The first should become default under the existing + // behavior; when we update the config and generate a second, it should + // take over as default. Deleting the first and re-importing it will make + // it default again, and then disabling the option and removing and + // reimporting the second and creating a new root won't affect it again. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X1", + "issuer_name": "root-1", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdOne := resp.Data["issuer_id"] + require.NotEmpty(t, issuerIdOne) + certOne := resp.Data["certificate"] + require.NotEmpty(t, certOne) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOne, resp.Data["default"]) + + // Enable the new config option. + _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": issuerIdOne, + "default_follows_latest_issuer": true, + }) + require.NoError(t, err) + + // Now generate the second root; it should become default. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X2", + "issuer_name": "root-2", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdTwo := resp.Data["issuer_id"] + require.NotEmpty(t, issuerIdTwo) + certTwo := resp.Data["certificate"] + require.NotEmpty(t, certTwo) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdTwo, resp.Data["default"]) + + // Deleting the first shouldn't affect the default issuer. + _, err = CBDelete(b, s, "issuer/root-1") + require.NoError(t, err) + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdTwo, resp.Data["default"]) + + // But reimporting it should update it to the new issuer's value. + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": certOne, + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdOneReimported := issuerID(resp.Data["imported_issuers"].([]string)[0]) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) + + // Now update the config to disable this option again. + _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": issuerIdOneReimported, + "default_follows_latest_issuer": false, + }) + require.NoError(t, err) + + // Generating a new root shouldn't update the default. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X3", + "issuer_name": "root-3", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdThree := resp.Data["issuer_id"] + require.NotEmpty(t, issuerIdThree) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) + + // Deleting and re-importing root 2 should also not affect it. + _, err = CBDelete(b, s, "issuer/root-2") + require.NoError(t, err) + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) + + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": certTwo, + }) + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, 1, len(resp.Data["imported_issuers"].([]string))) + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) +} + +func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuerID, keyID) { + return genTestRootCaWithIssuerName(t, b, s, "") +} + +func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuerID, keyID) { + data := map[string]interface{}{ + "common_name": "test.com", + } + if len(issuerName) > 0 { + data["issuer_name"] = issuerName + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/root/internal", + Storage: s, + Data: data, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + + issuerId := resp.Data["issuer_id"].(issuerID) + keyId := resp.Data["key_id"].(keyID) + + require.NotEmpty(t, issuerId, "returned issuer id was empty") + require.NotEmpty(t, keyId, "returned key id was empty") + + return issuerId, keyId +} diff --git a/builtin/logical/pki/integration_test.go b/builtin/logical/pki/integration_test.go deleted file mode 100644 index f6e9ec6482593..0000000000000 --- a/builtin/logical/pki/integration_test.go +++ /dev/null @@ -1,669 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "testing" - - "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - vaultocsp "github.com/hashicorp/vault/sdk/helper/ocsp" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" -) - -func TestIntegration_RotateRootUsesNext(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/rotate/internal", - Storage: s, - Data: map[string]interface{}{ - "common_name": "test.com", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed rotate root") - require.NotNil(t, resp, "got nil response from rotate root") - require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - - issuerId1 := resp.Data["issuer_id"].(issuerID) - issuerName1 := resp.Data["issuer_name"] - - require.NotEmpty(t, issuerId1, "issuer id was empty on initial rotate root command") - require.Equal(t, "next", issuerName1, "expected an issuer name of next on initial rotate root command") - - // Call it again, we should get a new issuer id, but since next issuer_name is used we should get a blank value. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/rotate/internal", - Storage: s, - Data: map[string]interface{}{ - "common_name": "test.com", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed rotate root") - require.NotNil(t, resp, "got nil response from rotate root") - require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - - issuerId2 := resp.Data["issuer_id"].(issuerID) - issuerName2 := resp.Data["issuer_name"] - - require.NotEmpty(t, issuerId2, "issuer id was empty on second rotate root command") - require.NotEqual(t, issuerId1, issuerId2, "should have been different issuer ids") - require.Empty(t, issuerName2, "expected a blank issuer name on the second rotate root command") - - // Call it again, making sure we can use our own name if desired. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/rotate/internal", - Storage: s, - Data: map[string]interface{}{ - "common_name": "test.com", - "issuer_name": "next-cert", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed rotate root") - require.NotNil(t, resp, "got nil response from rotate root") - require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - - issuerId3 := resp.Data["issuer_id"].(issuerID) - issuerName3 := resp.Data["issuer_name"] - - require.NotEmpty(t, issuerId3, "issuer id was empty on third rotate root command") - require.NotEqual(t, issuerId3, issuerId1, "should have been different issuer id from initial") - require.NotEqual(t, issuerId3, issuerId2, "should have been different issuer id from second call") - require.Equal(t, "next-cert", issuerName3, "expected an issuer name that we specified on third rotate root command") -} - -func TestIntegration_ReplaceRootNormal(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // generate roots - genTestRootCa(t, b, s) - issuerId2, _ := genTestRootCa(t, b, s) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/replace", - Storage: s, - Data: map[string]interface{}{ - "default": issuerId2.String(), - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root") - require.NotNil(t, resp, "got nil response from replacing root") - require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) - - replacedIssuer := resp.Data["default"] - require.Equal(t, issuerId2, replacedIssuer, "expected return value to match issuer we set") - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "config/issuers", - Storage: s, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root") - require.NotNil(t, resp, "got nil response from replacing root") - require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) - - defaultIssuer := resp.Data["default"] - require.Equal(t, issuerId2, defaultIssuer, "expected default issuer to be updated") -} - -func TestIntegration_ReplaceRootDefaultsToNext(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // generate roots - genTestRootCa(t, b, s) - issuerId2, _ := genTestRootCaWithIssuerName(t, b, s, "next") - - // Do not specify the default value to replace. - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/replace", - Storage: s, - Data: map[string]interface{}{}, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root") - require.NotNil(t, resp, "got nil response from replacing root") - require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) - - replacedIssuer := resp.Data["default"] - require.Equal(t, issuerId2, replacedIssuer, "expected return value to match the 'next' issuer we set") - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "config/issuers", - Storage: s, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root") - require.NotNil(t, resp, "got nil response from replacing root") - require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) - - defaultIssuer := resp.Data["default"] - require.Equal(t, issuerId2, defaultIssuer, "expected default issuer to be updated") -} - -func TestIntegration_ReplaceRootBadIssuer(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // generate roots - genTestRootCa(t, b, s) - genTestRootCa(t, b, s) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/replace", - Storage: s, - Data: map[string]interface{}{ - "default": "a-bad-issuer-id", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root, should have been an error in the response.") - require.NotNil(t, resp, "got nil response from replacing root") - require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) - - // Make sure we trap replacing with default. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/replace", - Storage: s, - Data: map[string]interface{}{ - "default": "default", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root, should have been an error in the response.") - require.NotNil(t, resp, "got nil response from replacing root") - require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) - - // Make sure we trap replacing with blank string. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/replace", - Storage: s, - Data: map[string]interface{}{ - "default": "", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed replacing root, should have been an error in the response.") - require.NotNil(t, resp, "got nil response from replacing root") - require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) -} - -func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { - t.Parallel() - rootBackend, rootStorage := CreateBackendWithStorage(t) - intBackend, intStorage := CreateBackendWithStorage(t) - - // generate root - resp, err := rootBackend.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issuers/generate/root/internal", - Storage: rootStorage, - Data: map[string]interface{}{ - "common_name": "test.com", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed generating root ca") - require.NotNil(t, resp, "got nil response from generating root ca") - require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) - rootCert := resp.Data["certificate"].(string) - - // generate intermediate - resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issuers/generate/intermediate/internal", - Storage: intStorage, - Data: map[string]interface{}{ - "common_name": "test.com", - }, - MountPoint: "pki-int/", - }) - require.NoError(t, err, "failed generating int ca") - require.NotNil(t, resp, "got nil response from generating int ca") - require.False(t, resp.IsError(), "got an error from generating int ca: %#v", resp) - intCsr := resp.Data["csr"].(string) - - // sign csr - resp, err = rootBackend.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-intermediate", - Storage: rootStorage, - Data: map[string]interface{}{ - "csr": intCsr, - "format": "pem_bundle", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed generating root ca") - require.NotNil(t, resp, "got nil response from generating root ca") - require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) - - intCert := resp.Data["certificate"].(string) - - // Send in the chain backwards now and make sure we link intCert as default. - resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "intermediate/set-signed", - Storage: intStorage, - Data: map[string]interface{}{ - "certificate": rootCert + "\n" + intCert + "\n", - }, - MountPoint: "pki-int/", - }) - require.NoError(t, err, "failed generating root ca") - require.NotNil(t, resp, "got nil response from generating root ca") - require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) - - // setup role - resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/example", - Storage: intStorage, - Data: map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }, - MountPoint: "pki-int/", - }) - require.NoError(t, err, "failed setting up role example") - require.NotNil(t, resp, "got nil response from setting up role example: %#v", resp) - - // Issue cert - resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issue/example", - Storage: intStorage, - Data: map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - }, - MountPoint: "pki-int/", - }) - require.NoError(t, err, "failed issuing a leaf cert from int ca") - require.NotNil(t, resp, "got nil response issuing a leaf cert from int ca") - require.False(t, resp.IsError(), "got an error issuing a leaf cert from int ca: %#v", resp) -} - -func TestIntegration_CSRGeneration(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - testCases := []struct { - keyType string - usePss bool - keyBits int - sigBits int - expectedPublicKeyType crypto.PublicKey - expectedSignature x509.SignatureAlgorithm - }{ - {"rsa", false, 2048, 0, &rsa.PublicKey{}, x509.SHA256WithRSA}, - {"rsa", false, 2048, 384, &rsa.PublicKey{}, x509.SHA384WithRSA}, - // Add back once https://github.com/golang/go/issues/45990 is fixed. - // {"rsa", true, 2048, 0, &rsa.PublicKey{}, x509.SHA256WithRSAPSS}, - // {"rsa", true, 2048, 512, &rsa.PublicKey{}, x509.SHA512WithRSAPSS}, - {"ec", false, 224, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA256}, - {"ec", false, 256, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA256}, - {"ec", false, 384, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA384}, - {"ec", false, 521, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA512}, - {"ec", false, 521, 224, &ecdsa.PublicKey{}, x509.ECDSAWithSHA512}, // We ignore signature_bits for ec - {"ed25519", false, 0, 0, ed25519.PublicKey{}, x509.PureEd25519}, // We ignore both fields for ed25519 - } - for _, tc := range testCases { - keyTypeName := tc.keyType - if tc.usePss { - keyTypeName = tc.keyType + "-pss" - } - testName := fmt.Sprintf("%s-%d-%d", keyTypeName, tc.keyBits, tc.sigBits) - t.Run(testName, func(t *testing.T) { - resp, err := CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ - "common_name": "myint.com", - "key_type": tc.keyType, - "key_bits": tc.keyBits, - "signature_bits": tc.sigBits, - "use_pss": tc.usePss, - }) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "csr") - - csrString := resp.Data["csr"].(string) - pemBlock, _ := pem.Decode([]byte(csrString)) - require.NotNil(t, pemBlock, "failed to parse returned csr pem block") - csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) - require.NoError(t, err, "failed parsing certificate request") - - require.Equal(t, tc.expectedSignature, csr.SignatureAlgorithm, - "Expected %s, got %s", tc.expectedSignature.String(), csr.SignatureAlgorithm.String()) - require.IsType(t, tc.expectedPublicKeyType, csr.PublicKey) - }) - } -} - -func TestIntegration_AutoIssuer(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Generate two roots. The first should become default under the existing - // behavior; when we update the config and generate a second, it should - // take over as default. Deleting the first and re-importing it will make - // it default again, and then disabling the option and removing and - // reimporting the second and creating a new root won't affect it again. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Root X1", - "issuer_name": "root-1", - "key_type": "ec", - }) - - requireSuccessNonNilResponse(t, resp, err) - issuerIdOne := resp.Data["issuer_id"] - require.NotEmpty(t, issuerIdOne) - certOne := resp.Data["certificate"] - require.NotEmpty(t, certOne) - - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdOne, resp.Data["default"]) - - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/issuers"), logical.ReadOperation), resp, true) - - // Enable the new config option. - resp, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ - "default": issuerIdOne, - "default_follows_latest_issuer": true, - }) - require.NoError(t, err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/issuers"), logical.UpdateOperation), resp, true) - - // Now generate the second root; it should become default. - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Root X2", - "issuer_name": "root-2", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err) - issuerIdTwo := resp.Data["issuer_id"] - require.NotEmpty(t, issuerIdTwo) - certTwo := resp.Data["certificate"] - require.NotEmpty(t, certTwo) - - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdTwo, resp.Data["default"]) - - // Deleting the first shouldn't affect the default issuer. - _, err = CBDelete(b, s, "issuer/root-1") - require.NoError(t, err) - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdTwo, resp.Data["default"]) - - // But reimporting it should update it to the new issuer's value. - resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": certOne, - }) - requireSuccessNonNilResponse(t, resp, err) - issuerIdOneReimported := issuerID(resp.Data["imported_issuers"].([]string)[0]) - - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdOneReimported, resp.Data["default"]) - - // Now update the config to disable this option again. - _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ - "default": issuerIdOneReimported, - "default_follows_latest_issuer": false, - }) - require.NoError(t, err) - - // Generating a new root shouldn't update the default. - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Root X3", - "issuer_name": "root-3", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err) - issuerIdThree := resp.Data["issuer_id"] - require.NotEmpty(t, issuerIdThree) - - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdOneReimported, resp.Data["default"]) - - // Deleting and re-importing root 2 should also not affect it. - _, err = CBDelete(b, s, "issuer/root-2") - require.NoError(t, err) - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdOneReimported, resp.Data["default"]) - - resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": certTwo, - }) - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, 1, len(resp.Data["imported_issuers"].([]string))) - resp, err = CBRead(b, s, "config/issuers") - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, issuerIdOneReimported, resp.Data["default"]) -} - -func TestIntegrationOCSPClientWithPKI(t *testing.T) { - t.Parallel() - - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - - cluster.Start() - defer cluster.Cleanup() - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "32h", - }, - }) - require.NoError(t, err) - - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root R1", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["issuer_id"]) - rootIssuerId := resp.Data["issuer_id"].(string) - - // Set URLs pointing to the issuer. - _, err = client.Logical().Write("pki/config/cluster", map[string]interface{}{ - "path": client.Address() + "/v1/pki", - "aia_path": client.Address() + "/v1/pki", - }) - require.NoError(t, err) - - _, err = client.Logical().Write("pki/config/urls", map[string]interface{}{ - "enable_templating": true, - "crl_distribution_points": "{{cluster_aia_path}}/issuer/{{issuer_id}}/crl/der", - "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", - "ocsp_servers": "{{cluster_aia_path}}/ocsp", - }) - require.NoError(t, err) - - // Build an intermediate CA - resp, err = client.Logical().Write("pki/intermediate/generate/internal", map[string]interface{}{ - "common_name": "Int X1", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["csr"]) - intermediateCSR := resp.Data["csr"].(string) - - resp, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ - "csr": intermediateCSR, - "ttl": "20h", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - intermediateCert := resp.Data["certificate"] - - resp, err = client.Logical().Write("pki/intermediate/set-signed", map[string]interface{}{ - "certificate": intermediateCert, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["imported_issuers"]) - rawImportedIssuers := resp.Data["imported_issuers"].([]interface{}) - require.Equal(t, len(rawImportedIssuers), 1) - importedIssuer := rawImportedIssuers[0].(string) - require.NotEmpty(t, importedIssuer) - - // Set intermediate as default. - _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ - "default": importedIssuer, - }) - require.NoError(t, err) - - // Setup roles for root, intermediate. - _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - "key_type": "ec", - "issuer_ref": rootIssuerId, - }) - require.NoError(t, err) - - _, err = client.Logical().Write("pki/roles/example-int", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - "key_type": "ec", - }) - require.NoError(t, err) - - // Issue certs and validate them against OCSP. - for _, path := range []string{"pki/issue/example-int", "pki/issue/example-root"} { - t.Logf("Validating against path: %v", path) - resp, err = client.Logical().Write(path, map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["issuing_ca"]) - require.NotEmpty(t, resp.Data["serial_number"]) - - certPEM := resp.Data["certificate"].(string) - certBlock, _ := pem.Decode([]byte(certPEM)) - require.NotNil(t, certBlock) - cert, err := x509.ParseCertificate(certBlock.Bytes) - require.NoError(t, err) - require.NotNil(t, cert) - - issuerPEM := resp.Data["issuing_ca"].(string) - issuerBlock, _ := pem.Decode([]byte(issuerPEM)) - require.NotNil(t, issuerBlock) - issuer, err := x509.ParseCertificate(issuerBlock.Bytes) - require.NoError(t, err) - require.NotNil(t, issuer) - - serialNumber := resp.Data["serial_number"].(string) - - testLogger := hclog.New(hclog.DefaultOptions) - - conf := &vaultocsp.VerifyConfig{ - OcspFailureMode: vaultocsp.FailOpenFalse, - ExtraCas: []*x509.Certificate{cluster.CACert}, - } - ocspClient := vaultocsp.New(func() hclog.Logger { - return testLogger - }, 10) - - err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) - require.NoError(t, err) - - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": serialNumber, - }) - require.NoError(t, err) - - err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) - require.Error(t, err) - } -} - -func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuerID, keyID) { - return genTestRootCaWithIssuerName(t, b, s, "") -} - -func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuerID, keyID) { - data := map[string]interface{}{ - "common_name": "test.com", - } - if len(issuerName) > 0 { - data["issuer_name"] = issuerName - } - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issuers/generate/root/internal", - Storage: s, - Data: data, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed generating root ca") - require.NotNil(t, resp, "got nil response from generating root ca") - require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) - - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) - - require.NotEmpty(t, issuerId, "returned issuer id was empty") - require.NotEmpty(t, keyId, "returned key id was empty") - - return issuerId, keyId -} diff --git a/builtin/logical/pki/key_util.go b/builtin/logical/pki/key_util.go index 5f2d19c65dad4..c40831714625e 100644 --- a/builtin/logical/pki/key_util.go +++ b/builtin/logical/pki/key_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( diff --git a/builtin/logical/pki/managed_key_util.go b/builtin/logical/pki/managed_key_util.go index 42e031deceb31..29ab43381329e 100644 --- a/builtin/logical/pki/managed_key_util.go +++ b/builtin/logical/pki/managed_key_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !enterprise package pki diff --git a/builtin/logical/pki/path_acme_account.go b/builtin/logical/pki/path_acme_account.go deleted file mode 100644 index 36801df151bff..0000000000000 --- a/builtin/logical/pki/path_acme_account.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - "net/http" - "path" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/strutil" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func uuidNameRegex(name string) string { - return fmt.Sprintf("(?P<%s>[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}?)", name) -} - -func pathAcmeNewAccount(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeNewAccount, "/new-account") -} - -func pathAcmeUpdateAccount(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeNewAccount, "/account/"+uuidNameRegex("kid")) -} - -func addFieldsForACMEPath(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { - if strings.Contains(pattern, framework.GenericNameRegex("role")) { - fields["role"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The desired role for the acme request`, - Required: true, - } - } - if strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) { - fields[issuerRefParam] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Reference to an existing issuer name or issuer id`, - Required: true, - } - } - - return fields -} - -func addFieldsForACMERequest(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["protected"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: "ACME request 'protected' value", - Required: false, - } - - fields["payload"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: "ACME request 'payload' value", - Required: false, - } - - fields["signature"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: "ACME request 'signature' value", - Required: false, - } - - return fields -} - -func addFieldsForACMEKidRequest(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { - if strings.Contains(pattern, uuidNameRegex("kid")) { - fields["kid"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The key identifier provided by the CA`, - Required: true, - } - } - - return fields -} - -func patternAcmeNewAccount(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - addFieldsForACMEKidRequest(fields, pattern) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeParsedWrapper(b.acmeNewAccountHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func (b *backend) acmeNewAccountHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { - // Parameters - var ok bool - var onlyReturnExisting bool - var contacts []string - var termsOfServiceAgreed bool - var status string - var eabData map[string]interface{} - - rawContact, present := data["contact"] - if present { - listContact, ok := rawContact.([]interface{}) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for field 'contact': %w", rawContact, ErrMalformed) - } - - for index, singleContact := range listContact { - contact, ok := singleContact.(string) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for field 'contact' item %d: %w", singleContact, index, ErrMalformed) - } - - contacts = append(contacts, contact) - } - } - - rawTermsOfServiceAgreed, present := data["termsOfServiceAgreed"] - if present { - termsOfServiceAgreed, ok = rawTermsOfServiceAgreed.(bool) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for field 'termsOfServiceAgreed': %w", rawTermsOfServiceAgreed, ErrMalformed) - } - } - - rawOnlyReturnExisting, present := data["onlyReturnExisting"] - if present { - onlyReturnExisting, ok = rawOnlyReturnExisting.(bool) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) - } - } - - // Per RFC 8555 7.3.6 Account deactivation, we will handle it within our update API. - rawStatus, present := data["status"] - if present { - status, ok = rawStatus.(string) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) - } - } - - if eabDataRaw, ok := data["externalAccountBinding"]; ok { - eabData, ok = eabDataRaw.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("%w: externalAccountBinding field was unparseable", ErrMalformed) - } - } - - // We have two paths here: search or create. - if onlyReturnExisting { - return b.acmeAccountSearchHandler(acmeCtx, userCtx) - } - - // Pass through the /new-account API calls to this specific handler as its requirements are different - // from the account update handler. - if strings.HasSuffix(r.Path, "/new-account") { - return b.acmeNewAccountCreateHandler(acmeCtx, userCtx, contacts, termsOfServiceAgreed, r, eabData) - } - - return b.acmeNewAccountUpdateHandler(acmeCtx, userCtx, contacts, status, eabData) -} - -func formatNewAccountResponse(acmeCtx *acmeContext, acct *acmeAccount, eabData map[string]interface{}) *logical.Response { - resp := formatAccountResponse(acmeCtx, acct) - - // Per RFC 8555 Section 7.1.2. Account Objects - // Including this field in a newAccount request indicates approval by - // the holder of an existing non-ACME account to bind that account to - // this ACME account - if acct.Eab != nil && len(eabData) != 0 { - resp.Data["externalAccountBinding"] = eabData - } - - return resp -} - -func formatAccountResponse(acmeCtx *acmeContext, acct *acmeAccount) *logical.Response { - location := acmeCtx.baseUrl.String() + "account/" + acct.KeyId - - resp := &logical.Response{ - Data: map[string]interface{}{ - "status": acct.Status, - "orders": location + "/orders", - }, - Headers: map[string][]string{ - "Location": {location}, - }, - } - - if len(acct.Contact) > 0 { - resp.Data["contact"] = acct.Contact - } - - return resp -} - -func (b *backend) acmeAccountSearchHandler(acmeCtx *acmeContext, userCtx *jwsCtx) (*logical.Response, error) { - thumbprint, err := userCtx.GetKeyThumbprint() - if err != nil { - return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) - } - - account, err := b.acmeState.LoadAccountByKey(acmeCtx, thumbprint) - if err != nil { - return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) - } - - if account != nil { - if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { - return nil, err - } - return formatAccountResponse(acmeCtx, account), nil - } - - // Per RFC 8555 Section 7.3.1. Finding an Account URL Given a Key: - // - // > If a client sends such a request and an account does not exist, - // > then the server MUST return an error response with status code - // > 400 (Bad Request) and type "urn:ietf:params:acme:error:accountDoesNotExist". - return nil, fmt.Errorf("An account with this key does not exist: %w", ErrAccountDoesNotExist) -} - -func (b *backend) acmeNewAccountCreateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, termsOfServiceAgreed bool, r *logical.Request, eabData map[string]interface{}) (*logical.Response, error) { - if userCtx.Existing { - return nil, fmt.Errorf("cannot submit to newAccount with 'kid': %w", ErrMalformed) - } - - // If the account already exists, return the existing one. - thumbprint, err := userCtx.GetKeyThumbprint() - if err != nil { - return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) - } - - accountByKey, err := b.acmeState.LoadAccountByKey(acmeCtx, thumbprint) - if err != nil { - return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) - } - - if accountByKey != nil { - if err = acmeCtx.eabPolicy.EnforceForExistingAccount(accountByKey); err != nil { - return nil, err - } - return formatAccountResponse(acmeCtx, accountByKey), nil - } - - var eab *eabType - if len(eabData) != 0 { - eab, err = verifyEabPayload(b.acmeState, acmeCtx, userCtx, r.Path, eabData) - if err != nil { - return nil, err - } - } - - // Verify against our EAB policy - if err = acmeCtx.eabPolicy.EnforceForNewAccount(eab); err != nil { - return nil, err - } - - // TODO: Limit this only when ToS are required or set by the operator, since we don't have a - // ToS URL in the directory at the moment, we can not enforce this. - //if !termsOfServiceAgreed { - // return nil, fmt.Errorf("terms of service not agreed to: %w", ErrUserActionRequired) - //} - - if eab != nil { - // We delete the EAB to prevent future re-use after associating it with an account, worst - // case if we fail creating the account we simply nuked the EAB which they can create another - // and retry - wasDeleted, err := b.acmeState.DeleteEab(acmeCtx.sc, eab.KeyID) - if err != nil { - return nil, fmt.Errorf("failed to delete eab reference: %w", err) - } - - if !wasDeleted { - // Something consumed our EAB before we did bail... - return nil, fmt.Errorf("eab was already used: %w", ErrUnauthorized) - } - } - - b.acmeAccountLock.RLock() // Prevents Account Creation and Tidy Interfering - defer b.acmeAccountLock.RUnlock() - - accountByKid, err := b.acmeState.CreateAccount(acmeCtx, userCtx, contact, termsOfServiceAgreed, eab) - if err != nil { - if eab != nil { - return nil, fmt.Errorf("failed to create account: %w; the EAB key used for this request has been deleted as a result of this operation; fetch a new EAB key before retrying", err) - } - return nil, fmt.Errorf("failed to create account: %w", err) - } - - resp := formatNewAccountResponse(acmeCtx, accountByKid, eabData) - - // Per RFC 8555 Section 7.3. Account Management: - // - // > The server returns this account object in a 201 (Created) response, - // > with the account URL in a Location header field. - resp.Data[logical.HTTPStatusCode] = http.StatusCreated - return resp, nil -} - -func (b *backend) acmeNewAccountUpdateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, status string, eabData map[string]interface{}) (*logical.Response, error) { - if !userCtx.Existing { - return nil, fmt.Errorf("cannot submit to account updates without a 'kid': %w", ErrMalformed) - } - - if len(eabData) != 0 { - return nil, fmt.Errorf("%w: not allowed to update EAB data in accounts", ErrMalformed) - } - - account, err := b.acmeState.LoadAccount(acmeCtx, userCtx.Kid) - if err != nil { - return nil, fmt.Errorf("error loading account: %w", err) - } - - if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { - return nil, err - } - - // Per RFC 8555 7.3.6 Account deactivation, if we were previously deactivated, we should return - // unauthorized. There is no way to reactivate any accounts per ACME RFC. - if account.Status != AccountStatusValid { - // Treating "revoked" and "deactivated" as the same here. - return nil, ErrUnauthorized - } - - shouldUpdate := false - // Check to see if we should update, we don't really care about ordering - if !strutil.EquivalentSlices(account.Contact, contact) { - shouldUpdate = true - account.Contact = contact - } - - // Check to process account de-activation status was requested. - // 7.3.6. Account Deactivation - if string(AccountStatusDeactivated) == status { - shouldUpdate = true - // TODO: This should cancel any ongoing operations (do not revoke certs), - // perhaps we should delete this account here? - account.Status = AccountStatusDeactivated - account.AccountRevokedDate = time.Now() - } - - if shouldUpdate { - err = b.acmeState.UpdateAccount(acmeCtx, account) - if err != nil { - return nil, fmt.Errorf("failed to update account: %w", err) - } - } - - resp := formatAccountResponse(acmeCtx, account) - return resp, nil -} - -func (b *backend) tidyAcmeAccountByThumbprint(as *acmeState, ac *acmeContext, keyThumbprint string, certTidyBuffer, accountTidyBuffer time.Duration) error { - thumbprintEntry, err := ac.sc.Storage.Get(ac.sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) - if err != nil { - return fmt.Errorf("error retrieving thumbprint entry %v, unable to find corresponding account entry: %w", keyThumbprint, err) - } - if thumbprintEntry == nil { - return fmt.Errorf("empty thumbprint entry %v, unable to find corresponding account entry", keyThumbprint) - } - - var thumbprint acmeThumbprint - err = thumbprintEntry.DecodeJSON(&thumbprint) - if err != nil { - return fmt.Errorf("unable to decode thumbprint entry %v to find account entry: %w", keyThumbprint, err) - } - - if len(thumbprint.Kid) == 0 { - return fmt.Errorf("unable to find account entry: empty kid within thumbprint entry: %s", keyThumbprint) - } - - // Now Get the Account: - accountEntry, err := ac.sc.Storage.Get(ac.sc.Context, acmeAccountPrefix+thumbprint.Kid) - if err != nil { - return err - } - if accountEntry == nil { - // We delete the Thumbprint Associated with the Account, and we are done - err = ac.sc.Storage.Delete(ac.sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) - if err != nil { - return err - } - b.tidyStatusIncDeletedAcmeAccountCount() - return nil - } - - var account acmeAccount - err = accountEntry.DecodeJSON(&account) - if err != nil { - return err - } - - // Tidy Orders On the Account - orderIds, err := as.ListOrderIds(ac, thumbprint.Kid) - if err != nil { - return err - } - allOrdersTidied := true - maxCertExpiryUpdated := false - for _, orderId := range orderIds { - wasTidied, orderExpiry, err := b.acmeTidyOrder(ac, thumbprint.Kid, getOrderPath(thumbprint.Kid, orderId), certTidyBuffer) - if err != nil { - return err - } - if !wasTidied { - allOrdersTidied = false - } - - if !orderExpiry.IsZero() && account.MaxCertExpiry.Before(orderExpiry) { - account.MaxCertExpiry = orderExpiry - maxCertExpiryUpdated = true - } - } - - now := time.Now() - if allOrdersTidied && - now.After(account.AccountCreatedDate.Add(accountTidyBuffer)) && - now.After(account.MaxCertExpiry.Add(accountTidyBuffer)) { - // Tidy this account - // If it is Revoked or Deactivated: - if (account.Status == AccountStatusRevoked || account.Status == AccountStatusDeactivated) && now.After(account.AccountRevokedDate.Add(accountTidyBuffer)) { - // We Delete the Account Associated with this Thumbprint: - err = ac.sc.Storage.Delete(ac.sc.Context, path.Join(acmeAccountPrefix, thumbprint.Kid)) - if err != nil { - return err - } - - // Now we delete the Thumbprint Associated with the Account: - err = ac.sc.Storage.Delete(ac.sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) - if err != nil { - return err - } - b.tidyStatusIncDeletedAcmeAccountCount() - } else if account.Status == AccountStatusValid { - // Revoke This Account - account.AccountRevokedDate = now - account.Status = AccountStatusRevoked - err := as.UpdateAccount(ac, &account) - if err != nil { - return err - } - b.tidyStatusIncRevAcmeAccountCount() - } - } - - // Only update the account if we modified the max cert expiry values and the account is still valid, - // to prevent us from adding back a deleted account or not re-writing the revoked account that was - // already written above. - if maxCertExpiryUpdated && account.Status == AccountStatusValid { - // Update our expiry time we previously setup. - err := as.UpdateAccount(ac, &account) - if err != nil { - return err - } - } - - return nil -} diff --git a/builtin/logical/pki/path_acme_authorizations.go b/builtin/logical/pki/path_acme_authorizations.go deleted file mode 100644 index 9914491f619ee..0000000000000 --- a/builtin/logical/pki/path_acme_authorizations.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathAcmeAuthorization(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeAuthorization, "/authorization/"+framework.MatchAllRegex("auth_id")) -} - -func addFieldsForACMEAuthorization(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["auth_id"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: "ACME authorization identifier value", - Required: true, - } - - return fields -} - -func patternAcmeAuthorization(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - addFieldsForACMEAuthorization(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeAuthorizationHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func (b *backend) acmeAuthorizationHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { - authId := fields.Get("auth_id").(string) - authz, err := b.acmeState.LoadAuthorization(acmeCtx, userCtx, authId) - if err != nil { - return nil, fmt.Errorf("failed to load authorization: %w", err) - } - - var status string - rawStatus, haveStatus := data["status"] - if haveStatus { - var ok bool - status, ok = rawStatus.(string) - if !ok { - return nil, fmt.Errorf("bad type (%T) for value 'status': %w", rawStatus, ErrMalformed) - } - } - - if len(data) == 0 { - return b.acmeAuthorizationFetchHandler(acmeCtx, r, fields, userCtx, data, authz) - } - - if haveStatus && status == "deactivated" { - return b.acmeAuthorizationDeactivateHandler(acmeCtx, r, fields, userCtx, data, authz) - } - - return nil, ErrMalformed -} - -func (b *backend) acmeAuthorizationFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { - return &logical.Response{ - Data: authz.NetworkMarshal(acmeCtx), - }, nil -} - -func (b *backend) acmeAuthorizationDeactivateHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { - if authz.Status != ACMEAuthorizationPending && authz.Status != ACMEAuthorizationValid { - return nil, fmt.Errorf("unable to deactivate authorization in '%v' status: %w", authz.Status, ErrMalformed) - } - - authz.Status = ACMEAuthorizationDeactivated - for _, challenge := range authz.Challenges { - challenge.Status = ACMEChallengeInvalid - } - - if err := b.acmeState.SaveAuthorization(acmeCtx, authz); err != nil { - return nil, fmt.Errorf("error saving deactivated authorization: %w", err) - } - - return &logical.Response{ - Data: authz.NetworkMarshal(acmeCtx), - }, nil -} diff --git a/builtin/logical/pki/path_acme_challenges.go b/builtin/logical/pki/path_acme_challenges.go deleted file mode 100644 index cded9d32a3c9c..0000000000000 --- a/builtin/logical/pki/path_acme_challenges.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathAcmeChallenge(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeChallenge, - "/challenge/"+framework.MatchAllRegex("auth_id")+"/"+framework.MatchAllRegex("challenge_type")) -} - -func addFieldsForACMEChallenge(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["auth_id"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: "ACME authorization identifier value", - Required: true, - } - - fields["challenge_type"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: "ACME challenge type", - Required: true, - } - - return fields -} - -func patternAcmeChallenge(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - addFieldsForACMEChallenge(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeChallengeHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func (b *backend) acmeChallengeHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { - authId := fields.Get("auth_id").(string) - challengeType := fields.Get("challenge_type").(string) - - authz, err := b.acmeState.LoadAuthorization(acmeCtx, userCtx, authId) - if err != nil { - return nil, fmt.Errorf("failed to load authorization: %w", err) - } - - return b.acmeChallengeFetchHandler(acmeCtx, r, fields, userCtx, data, authz, challengeType) -} - -func (b *backend) acmeChallengeFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization, challengeType string) (*logical.Response, error) { - var challenge *ACMEChallenge - for _, c := range authz.Challenges { - if string(c.Type) == challengeType { - challenge = c - break - } - } - - if challenge == nil { - return nil, fmt.Errorf("unknown challenge of type '%v' in authorization: %w", challengeType, ErrMalformed) - } - - // Per RFC 8555 Section 7.5.1. Responding to Challenges: - // - // > The client indicates to the server that it is ready for the challenge - // > validation by sending an empty JSON body ("{}") carried in a POST - // > request to the challenge URL (not the authorization URL). - if len(data) > 0 { - return nil, fmt.Errorf("unexpected request parameters: %w", ErrMalformed) - } - - // If data was nil, we got a POST-as-GET request, just return current challenge without an accept, - // otherwise we most likely got a "{}" payload which we should now accept the challenge. - if data != nil { - thumbprint, err := userCtx.GetKeyThumbprint() - if err != nil { - return nil, fmt.Errorf("failed to get thumbprint for key: %w", err) - } - - if err := b.acmeState.validator.AcceptChallenge(acmeCtx.sc, userCtx.Kid, authz, challenge, thumbprint); err != nil { - return nil, fmt.Errorf("error submitting challenge for validation: %w", err) - } - } - - return &logical.Response{ - Data: challenge.NetworkMarshal(acmeCtx, authz.Id), - - // Per RFC 8555 Section 7.1. Resources: - // - // > The "up" link relation is used with challenge resources to indicate - // > the authorization resource to which a challenge belongs. - Headers: map[string][]string{ - "Link": {fmt.Sprintf("<%s>;rel=\"up\"", buildAuthorizationUrl(acmeCtx, authz.Id))}, - }, - }, nil -} diff --git a/builtin/logical/pki/path_acme_directory.go b/builtin/logical/pki/path_acme_directory.go deleted file mode 100644 index e556b35db5bd9..0000000000000 --- a/builtin/logical/pki/path_acme_directory.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - pathAcmeHelpSync = `An endpoint implementing the standard ACME protocol` - pathAcmeHelpDesc = `This API endpoint implementing a subset of the ACME protocol - defined in RFC 8555, with its own authentication and argument syntax that - does not follow conventional Vault operations. An ACME client tool or library - should be used to interact with these endpoints.` -) - -func pathAcmeDirectory(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeDirectory, "/directory") -} - -func patternAcmeDirectory(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.acmeWrapper(b.acmeDirectoryHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func (b *backend) acmeDirectoryHandler(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - rawBody, err := json.Marshal(map[string]interface{}{ - "newNonce": acmeCtx.baseUrl.JoinPath("new-nonce").String(), - "newAccount": acmeCtx.baseUrl.JoinPath("new-account").String(), - "newOrder": acmeCtx.baseUrl.JoinPath("new-order").String(), - "revokeCert": acmeCtx.baseUrl.JoinPath("revoke-cert").String(), - "keyChange": acmeCtx.baseUrl.JoinPath("key-change").String(), - // This is purposefully missing newAuthz as we don't support pre-authorization - "meta": map[string]interface{}{ - "externalAccountRequired": acmeCtx.eabPolicy.IsExternalAccountRequired(), - }, - }) - if err != nil { - return nil, fmt.Errorf("failed encoding response: %w", err) - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: "application/json", - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: rawBody, - }, - }, nil -} diff --git a/builtin/logical/pki/path_acme_eab.go b/builtin/logical/pki/path_acme_eab.go deleted file mode 100644 index d575d6c453d1f..0000000000000 --- a/builtin/logical/pki/path_acme_eab.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "path" - "time" - - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -var decodedTokenPrefix = mustBase64Decode("vault-eab-0-") - -func mustBase64Decode(s string) []byte { - bytes, err := base64.RawURLEncoding.DecodeString(s) - if err != nil { - panic(fmt.Sprintf("Token prefix value: %s failed decoding: %v", s, err)) - } - - // Should be dividable by 3 otherwise our prefix will not be properly honored. - if len(bytes)%3 != 0 { - panic(fmt.Sprintf("Token prefix value: %s is not dividable by 3, will not prefix properly", s)) - } - return bytes -} - -/* - * This file unlike the other path_acme_xxx.go are VAULT APIs to manage the - * ACME External Account Bindings, this isn't providing any APIs that an ACME - * client would use. - */ -func pathAcmeEabList(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "eab/?$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - - Fields: map[string]*framework.FieldSchema{}, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ListOperation: &framework.PathOperation{ - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "list-eab-key", - OperationSuffix: "acme", - }, - Callback: b.pathAcmeListEab, - }, - }, - - HelpSynopsis: "list external account bindings to be used for ACME", - HelpDescription: `list identifiers that have been generated but yet to be used.`, - } -} - -func pathAcmeNewEab(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeNewEab, "/new-eab") -} - -func patternAcmeNewEab(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathAcmeCreateEab, - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "generate-eab-key", - OperationSuffix: "acme", - }, - }, - }, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - - HelpSynopsis: "Generate external account bindings to be used for ACME", - HelpDescription: `Generate single use id/key pairs to be used for ACME EAB.`, - } -} - -func pathAcmeEabDelete(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "eab/" + uuidNameRegex("key_id"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: "EAB key identifier", - Required: true, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.DeleteOperation: &framework.PathOperation{ - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "acme-configuration", - }, - Callback: b.pathAcmeDeleteEab, - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: "Delete an external account binding id prior to its use within an ACME account", - HelpDescription: `Allows an operator to delete an external account binding, -before its bound to a new ACME account. If the identifier provided does not exist or -was already consumed by an ACME account a successful response is returned along with -a warning that it did not exist.`, - } -} - -type eabType struct { - KeyID string `json:"-"` - KeyType string `json:"key-type"` - PrivateBytes []byte `json:"private-bytes"` - AcmeDirectory string `json:"acme-directory"` - CreatedOn time.Time `json:"created-on"` -} - -func (b *backend) pathAcmeListEab(ctx context.Context, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, r.Storage) - - eabIds, err := b.acmeState.ListEabIds(sc) - if err != nil { - return nil, err - } - - var warnings []string - var keyIds []string - keyInfos := map[string]interface{}{} - - for _, eabKey := range eabIds { - eab, err := b.acmeState.LoadEab(sc, eabKey) - if err != nil { - warnings = append(warnings, fmt.Sprintf("failed loading eab entry %s: %v", eabKey, err)) - continue - } - - keyIds = append(keyIds, eab.KeyID) - keyInfos[eab.KeyID] = map[string]interface{}{ - "key_type": eab.KeyType, - "acme_directory": path.Join(eab.AcmeDirectory, "directory"), - "created_on": eab.CreatedOn.Format(time.RFC3339), - } - } - - resp := logical.ListResponseWithInfo(keyIds, keyInfos) - for _, warning := range warnings { - resp.AddWarning(warning) - } - return resp, nil -} - -func (b *backend) pathAcmeCreateEab(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { - kid := genUuid() - size := 32 - bytes, err := uuid.GenerateRandomBytesWithReader(size, rand.Reader) - if err != nil { - return nil, fmt.Errorf("failed generating eab key: %w", err) - } - - acmeDirectory, err := getAcmeDirectory(r) - if err != nil { - return nil, err - } - - eab := &eabType{ - KeyID: kid, - KeyType: "hs", - PrivateBytes: append(decodedTokenPrefix, bytes...), // we do this to avoid generating tokens that start with - - AcmeDirectory: acmeDirectory, - CreatedOn: time.Now(), - } - - sc := b.makeStorageContext(ctx, r.Storage) - err = b.acmeState.SaveEab(sc, eab) - if err != nil { - return nil, fmt.Errorf("failed saving generated eab: %w", err) - } - - encodedKey := base64.RawURLEncoding.EncodeToString(eab.PrivateBytes) - - return &logical.Response{ - Data: map[string]interface{}{ - "id": eab.KeyID, - "key_type": eab.KeyType, - "key": encodedKey, - "acme_directory": path.Join(eab.AcmeDirectory, "directory"), - "created_on": eab.CreatedOn.Format(time.RFC3339), - }, - }, nil -} - -func (b *backend) pathAcmeDeleteEab(ctx context.Context, r *logical.Request, d *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, r.Storage) - keyId := d.Get("key_id").(string) - - _, err := uuid.ParseUUID(keyId) - if err != nil { - return nil, fmt.Errorf("badly formatted key_id field") - } - - deleted, err := b.acmeState.DeleteEab(sc, keyId) - if err != nil { - return nil, fmt.Errorf("failed deleting key id: %w", err) - } - - resp := &logical.Response{} - if !deleted { - resp.AddWarning("No key id found with id: " + keyId) - } - return resp, nil -} diff --git a/builtin/logical/pki/path_acme_nonce.go b/builtin/logical/pki/path_acme_nonce.go deleted file mode 100644 index e973039a2226f..0000000000000 --- a/builtin/logical/pki/path_acme_nonce.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - "net/http" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathAcmeNonce(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeNonce, "/new-nonce") -} - -func patternAcmeNonce(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.HeaderOperation: &framework.PathOperation{ - Callback: b.acmeWrapper(b.acmeNonceHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.acmeWrapper(b.acmeNonceHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func (b *backend) acmeNonceHandler(ctx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - nonce, _, err := b.acmeState.GetNonce() - if err != nil { - return nil, err - } - - // Header operations return 200, GET return 204. - httpStatus := http.StatusOK - if r.Operation == logical.ReadOperation { - httpStatus = http.StatusNoContent - } - - return &logical.Response{ - Headers: map[string][]string{ - "Cache-Control": {"no-store"}, - "Replay-Nonce": {nonce}, - "Link": genAcmeLinkHeader(ctx), - }, - Data: map[string]interface{}{ - logical.HTTPStatusCode: httpStatus, - // Get around Vault limitation of requiring a body set if the status is not http.StatusNoContent - // for our HEAD request responses. - logical.HTTPContentType: "", - }, - }, nil -} - -func genAcmeLinkHeader(ctx *acmeContext) []string { - path := fmt.Sprintf("<%s>;rel=\"index\"", ctx.baseUrl.JoinPath("directory").String()) - return []string{path} -} diff --git a/builtin/logical/pki/path_acme_order.go b/builtin/logical/pki/path_acme_order.go deleted file mode 100644 index eb2ffaf9e681e..0000000000000 --- a/builtin/logical/pki/path_acme_order.go +++ /dev/null @@ -1,1084 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "crypto/x509" - "encoding/base64" - "encoding/pem" - "fmt" - "net" - "net/http" - "sort" - "strings" - "time" - - "github.com/hashicorp/vault/sdk/helper/strutil" - - "github.com/hashicorp/vault/sdk/helper/certutil" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - "golang.org/x/net/idna" -) - -var maxAcmeCertTTL = 90 * (24 * time.Hour) - -func pathAcmeListOrders(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeListOrders, "/orders") -} - -func pathAcmeGetOrder(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeGetOrder, "/order/"+uuidNameRegex("order_id")) -} - -func pathAcmeNewOrder(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeNewOrder, "/new-order") -} - -func pathAcmeFinalizeOrder(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeFinalizeOrder, "/order/"+uuidNameRegex("order_id")+"/finalize") -} - -func pathAcmeFetchOrderCert(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeFetchOrderCert, "/order/"+uuidNameRegex("order_id")+"/cert") -} - -func patternAcmeNewOrder(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeNewOrderHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func patternAcmeListOrders(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeListOrdersHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func patternAcmeGetOrder(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - addFieldsForACMEOrder(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeGetOrderHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func patternAcmeFinalizeOrder(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - addFieldsForACMEOrder(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeFinalizeOrderHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func patternAcmeFetchOrderCert(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - addFieldsForACMEOrder(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeAccountRequiredWrapper(b.acmeFetchCertOrderHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func addFieldsForACMEOrder(fields map[string]*framework.FieldSchema) { - fields["order_id"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The ACME order identifier to fetch`, - Required: true, - } -} - -func (b *backend) acmeFetchCertOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { - orderId := fields.Get("order_id").(string) - - order, err := b.acmeState.LoadOrder(ac, uc, orderId) - if err != nil { - return nil, err - } - - if order.Status != ACMEOrderValid { - return nil, fmt.Errorf("%w: order is status %s, needs to be in valid state", ErrOrderNotReady, order.Status) - } - - if len(order.IssuerId) == 0 || len(order.CertificateSerialNumber) == 0 { - return nil, fmt.Errorf("order is missing required fields to load certificate") - } - - certEntry, err := fetchCertBySerial(ac.sc, "certs/", order.CertificateSerialNumber) - if err != nil { - return nil, fmt.Errorf("failed reading certificate %s from storage: %w", order.CertificateSerialNumber, err) - } - if certEntry == nil || len(certEntry.Value) == 0 { - return nil, fmt.Errorf("missing certificate %s from storage", order.CertificateSerialNumber) - } - - cert, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return nil, fmt.Errorf("failed parsing certificate %s: %w", order.CertificateSerialNumber, err) - } - - issuer, err := ac.sc.fetchIssuerById(order.IssuerId) - if err != nil { - return nil, fmt.Errorf("failed loading certificate issuer %s from storage: %w", order.IssuerId, err) - } - - allPems, err := func() ([]byte, error) { - leafPEM := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }) - - chains := []byte(issuer.Certificate) - for _, chainVal := range issuer.CAChain { - if chainVal == issuer.Certificate { - continue - } - chains = append(chains, []byte(chainVal)...) - } - - return append(leafPEM, chains...), nil - }() - if err != nil { - return nil, fmt.Errorf("failed encoding certificate ca chain: %w", err) - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: "application/pem-certificate-chain", - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: allPems, - }, - }, nil -} - -func (b *backend) acmeFinalizeOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { - orderId := fields.Get("order_id").(string) - - csr, err := parseCsrFromFinalize(data) - if err != nil { - return nil, err - } - - order, err := b.acmeState.LoadOrder(ac, uc, orderId) - if err != nil { - return nil, err - } - - order.Status, err = computeOrderStatus(ac, uc, order) - if err != nil { - return nil, err - } - - if order.Status != ACMEOrderReady { - return nil, fmt.Errorf("%w: order is status %s, needs to be in ready state", ErrOrderNotReady, order.Status) - } - - now := time.Now() - if !order.Expires.IsZero() && now.After(order.Expires) { - return nil, fmt.Errorf("%w: order %s is expired", ErrMalformed, orderId) - } - - if err = validateCsrMatchesOrder(csr, order); err != nil { - return nil, err - } - - if err = validateCsrNotUsingAccountKey(csr, uc); err != nil { - return nil, err - } - - signedCertBundle, issuerId, err := issueCertFromCsr(ac, csr) - if err != nil { - return nil, err - } - - hyphenSerialNumber := normalizeSerialFromBigInt(signedCertBundle.Certificate.SerialNumber) - err = storeCertificate(ac.sc, signedCertBundle) - if err != nil { - return nil, err - } - - if err := b.acmeState.TrackIssuedCert(ac, order.AccountId, hyphenSerialNumber, order.OrderId); err != nil { - b.Logger().Warn("orphaned generated ACME certificate due to error saving account->cert->order reference", "serial_number", hyphenSerialNumber, "error", err) - return nil, err - } - - order.Status = ACMEOrderValid - order.CertificateSerialNumber = hyphenSerialNumber - order.CertificateExpiry = signedCertBundle.Certificate.NotAfter - order.IssuerId = issuerId - - err = b.acmeState.SaveOrder(ac, order) - if err != nil { - b.Logger().Warn("orphaned generated ACME certificate due to error saving order", "serial_number", hyphenSerialNumber, "error", err) - return nil, fmt.Errorf("failed saving updated order: %w", err) - } - - if err := b.doTrackBilling(ac.sc.Context, order.Identifiers); err != nil { - b.Logger().Error("failed to track billing for order", "order", orderId, "error", err) - err = nil - } - - return formatOrderResponse(ac, order), nil -} - -func computeOrderStatus(ac *acmeContext, uc *jwsCtx, order *acmeOrder) (ACMEOrderStatusType, error) { - // If we reached a final stage, no use computing anything else - if order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid { - return order.Status, nil - } - - // We aren't in a final state yet, check for expiry - if time.Now().After(order.Expires) { - return ACMEOrderInvalid, nil - } - - // Intermediary steps passed authorizations should short circuit us as well - if order.Status == ACMEOrderReady || order.Status == ACMEOrderProcessing { - return order.Status, nil - } - - // If we have no authorizations attached to the order, nothing to compute either - if len(order.AuthorizationIds) == 0 { - return ACMEOrderPending, nil - } - - anyFailed := false - allPassed := true - for _, authId := range order.AuthorizationIds { - authorization, err := ac.getAcmeState().LoadAuthorization(ac, uc, authId) - if err != nil { - return order.Status, fmt.Errorf("failed loading authorization: %s: %w", authId, err) - } - - if authorization.Status == ACMEAuthorizationPending { - allPassed = false - continue - } - - if authorization.Status != ACMEAuthorizationValid { - // Per RFC 8555 - 7.1.6. Status Changes - // The order also moves to the "invalid" state if it expires or - // one of its authorizations enters a final state other than - // "valid" ("expired", "revoked", or "deactivated"). - allPassed = false - anyFailed = true - break - } - } - - if anyFailed { - return ACMEOrderInvalid, nil - } - - if allPassed { - return ACMEOrderReady, nil - } - - // The order has not expired, no authorizations have yet to be marked as failed - // nor have we passed them all. - return ACMEOrderPending, nil -} - -func validateCsrNotUsingAccountKey(csr *x509.CertificateRequest, uc *jwsCtx) error { - csrKey := csr.PublicKey - userKey := uc.Key.Public().Key - - sameKey, err := certutil.ComparePublicKeysAndType(csrKey, userKey) - if err != nil { - return err - } - - if sameKey { - return fmt.Errorf("%w: certificate public key must not match account key", ErrBadCSR) - } - - return nil -} - -func validateCsrMatchesOrder(csr *x509.CertificateRequest, order *acmeOrder) error { - csrDNSIdentifiers, csrIPIdentifiers := getIdentifiersFromCSR(csr) - orderDNSIdentifiers := strutil.RemoveDuplicates(order.getIdentifierDNSValues(), true) - orderIPIdentifiers := removeDuplicatesAndSortIps(order.getIdentifierIPValues()) - - if len(orderDNSIdentifiers) == 0 && len(orderIPIdentifiers) == 0 { - return fmt.Errorf("%w: order did not include any identifiers", ErrServerInternal) - } - - if len(orderDNSIdentifiers) != len(csrDNSIdentifiers) { - return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of DNS identifiers", ErrBadCSR, len(orderDNSIdentifiers), len(csrDNSIdentifiers)) - } - - if len(orderIPIdentifiers) != len(csrIPIdentifiers) { - return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of IP identifiers", ErrBadCSR, len(orderIPIdentifiers), len(csrIPIdentifiers)) - } - - for i, identifier := range orderDNSIdentifiers { - if identifier != csrDNSIdentifiers[i] { - return fmt.Errorf("%w: CSR is missing order DNS identifier %s", ErrBadCSR, identifier) - } - } - - for i, identifier := range orderIPIdentifiers { - if !identifier.Equal(csrIPIdentifiers[i]) { - return fmt.Errorf("%w: CSR is missing order IP identifier %s", ErrBadCSR, identifier.String()) - } - } - - // Since we do not support NotBefore/NotAfter dates at this time no need to validate CSR/Order match. - - return nil -} - -func (b *backend) validateIdentifiersAgainstRole(role *roleEntry, identifiers []*ACMEIdentifier) error { - for _, identifier := range identifiers { - switch identifier.Type { - case ACMEDNSIdentifier: - data := &inputBundle{ - role: role, - req: &logical.Request{}, - apiData: &framework.FieldData{}, - } - - if validateNames(b, data, []string{identifier.OriginalValue}) != "" { - return fmt.Errorf("%w: role (%s) will not issue certificate for name %v", - ErrRejectedIdentifier, role.Name, identifier.OriginalValue) - } - case ACMEIPIdentifier: - if !role.AllowIPSANs { - return fmt.Errorf("%w: role (%s) does not allow IP sans, so cannot issue certificate for %v", - ErrRejectedIdentifier, role.Name, identifier.OriginalValue) - } - default: - return fmt.Errorf("unknown type of identifier: %v for %v", identifier.Type, identifier.OriginalValue) - } - } - - return nil -} - -func getIdentifiersFromCSR(csr *x509.CertificateRequest) ([]string, []net.IP) { - dnsIdentifiers := append([]string(nil), csr.DNSNames...) - ipIdentifiers := append([]net.IP(nil), csr.IPAddresses...) - - if csr.Subject.CommonName != "" { - ip := net.ParseIP(csr.Subject.CommonName) - if ip != nil { - ipIdentifiers = append(ipIdentifiers, ip) - } else { - dnsIdentifiers = append(dnsIdentifiers, csr.Subject.CommonName) - } - } - - return strutil.RemoveDuplicates(dnsIdentifiers, true), removeDuplicatesAndSortIps(ipIdentifiers) -} - -func removeDuplicatesAndSortIps(ipIdentifiers []net.IP) []net.IP { - var uniqueIpIdentifiers []net.IP - for _, ip := range ipIdentifiers { - found := false - for _, curIp := range uniqueIpIdentifiers { - if curIp.Equal(ip) { - found = true - } - } - - if !found { - uniqueIpIdentifiers = append(uniqueIpIdentifiers, ip) - } - } - - sort.Slice(uniqueIpIdentifiers, func(i, j int) bool { - return uniqueIpIdentifiers[i].String() < uniqueIpIdentifiers[j].String() - }) - return uniqueIpIdentifiers -} - -func storeCertificate(sc *storageContext, signedCertBundle *certutil.ParsedCertBundle) error { - hyphenSerialNumber := normalizeSerialFromBigInt(signedCertBundle.Certificate.SerialNumber) - key := "certs/" + hyphenSerialNumber - certsCounted := sc.Backend.certsCounted.Load() - err := sc.Storage.Put(sc.Context, &logical.StorageEntry{ - Key: key, - Value: signedCertBundle.CertificateBytes, - }) - if err != nil { - return fmt.Errorf("unable to store certificate locally: %w", err) - } - sc.Backend.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) - return nil -} - -func maybeAugmentReqDataWithSuitableCN(ac *acmeContext, csr *x509.CertificateRequest, data *framework.FieldData) { - // Role doesn't require a CN, so we don't care. - if !ac.role.RequireCN { - return - } - - // CSR contains a CN, so use that one. - if csr.Subject.CommonName != "" { - return - } - - // Choose a CN in the order wildcard -> DNS -> IP -> fail. - for _, name := range csr.DNSNames { - if strings.Contains(name, "*") { - data.Raw["common_name"] = name - return - } - } - if len(csr.DNSNames) > 0 { - data.Raw["common_name"] = csr.DNSNames[0] - return - } - if len(csr.IPAddresses) > 0 { - data.Raw["common_name"] = csr.IPAddresses[0].String() - return - } -} - -func issueCertFromCsr(ac *acmeContext, csr *x509.CertificateRequest) (*certutil.ParsedCertBundle, issuerID, error) { - pemBlock := &pem.Block{ - Type: "CERTIFICATE REQUEST", - Headers: nil, - Bytes: csr.Raw, - } - pemCsr := string(pem.EncodeToMemory(pemBlock)) - - data := &framework.FieldData{ - Raw: map[string]interface{}{ - "csr": pemCsr, - }, - Schema: getCsrSignVerbatimSchemaFields(), - } - - // XXX: Usability hack: by default, minimalist roles have require_cn=true, - // but some ACME clients do not provision one in the certificate as modern - // (TLS) clients are mostly verifying against server's DNS SANs. - maybeAugmentReqDataWithSuitableCN(ac, csr, data) - - signingBundle, issuerId, err := ac.sc.fetchCAInfoWithIssuer(ac.issuer.ID.String(), IssuanceUsage) - if err != nil { - return nil, "", fmt.Errorf("failed loading CA %s: %w", ac.issuer.ID.String(), err) - } - - // ACME issued cert will override the TTL values to truncate to the issuer's - // expiration if we go beyond, no matter the setting - if signingBundle.LeafNotAfterBehavior == certutil.ErrNotAfterBehavior { - signingBundle.LeafNotAfterBehavior = certutil.TruncateNotAfterBehavior - } - - input := &inputBundle{ - req: &logical.Request{}, - apiData: data, - role: ac.role, - } - - normalNotAfter, _, err := getCertificateNotAfter(ac.sc.Backend, input, signingBundle) - if err != nil { - return nil, "", fmt.Errorf("failed computing certificate TTL from role/mount: %v: %w", err, ErrMalformed) - } - - // Force a maximum 90 day TTL or lower for ACME - if time.Now().Add(maxAcmeCertTTL).Before(normalNotAfter) { - input.apiData.Raw["ttl"] = maxAcmeCertTTL - } - - if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { - return nil, "", fmt.Errorf("%w: Refusing to sign CSR with empty PublicKey", ErrBadCSR) - } - - // UseCSRValues as defined in certutil/helpers.go accepts the following - // fields off of the CSR: - // - // 1. Subject fields, - // 2. SANs, - // 3. Extensions (except for a BasicConstraint extension) - // - // Because we have stricter validation of subject parameters, and no way - // to validate or allow extensions, we do not wish to use the CSR's - // parameters for these values. If a CSR sets, e.g., an organizational - // unit, we have no way of validating this (via ACME here, without perhaps - // an external policy engine), and thus should not be setting it on our - // final issued certificate. - parsedBundle, _, err := signCert(ac.sc.Backend, input, signingBundle, false /* is_ca=false */, false /* use_csr_values */) - if err != nil { - return nil, "", fmt.Errorf("%w: refusing to sign CSR: %s", ErrBadCSR, err.Error()) - } - - if err = parsedBundle.Verify(); err != nil { - return nil, "", fmt.Errorf("verification of parsed bundle failed: %w", err) - } - - // We only allow ServerAuth key usage from ACME issued certs. - for _, usage := range parsedBundle.Certificate.ExtKeyUsage { - if usage != x509.ExtKeyUsageServerAuth { - return nil, "", fmt.Errorf("%w: ACME certs only allow ServerAuth key usage", ErrBadCSR) - } - } - - return parsedBundle, issuerId, err -} - -func parseCsrFromFinalize(data map[string]interface{}) (*x509.CertificateRequest, error) { - csrInterface, present := data["csr"] - if !present { - return nil, fmt.Errorf("%w: missing csr in payload", ErrMalformed) - } - - base64Csr, ok := csrInterface.(string) - if !ok { - return nil, fmt.Errorf("%w: csr in payload not the expected type: %T", ErrMalformed, csrInterface) - } - - derCsr, err := base64.RawURLEncoding.DecodeString(base64Csr) - if err != nil { - return nil, fmt.Errorf("%w: failed base64 decoding csr: %s", ErrMalformed, err.Error()) - } - - csr, err := x509.ParseCertificateRequest(derCsr) - if err != nil { - return nil, fmt.Errorf("%w: failed to parse csr: %s", ErrMalformed, err.Error()) - } - - if csr.PublicKey == nil || csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm { - return nil, fmt.Errorf("%w: failed to parse csr no public key info or unknown key algorithm used", ErrBadCSR) - } - - for _, ext := range csr.Extensions { - if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { - isCa, _, err := certutil.ParseBasicConstraintExtension(ext) - if err != nil { - return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension: %v", ErrBadCSR, err.Error()) - } - - if isCa { - return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension with CA set to true", ErrBadCSR) - } - } - } - - return csr, nil -} - -func (b *backend) acmeGetOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { - orderId := fields.Get("order_id").(string) - - order, err := b.acmeState.LoadOrder(ac, uc, orderId) - if err != nil { - return nil, err - } - - order.Status, err = computeOrderStatus(ac, uc, order) - if err != nil { - return nil, err - } - - // Per RFC 8555 -> 7.1.3. Order Objects - // For final orders (in the "valid" or "invalid" state), the authorizations that were completed. - // - // Otherwise, for "pending" orders we will return our list as it was originally saved. - requiresFiltering := order.Status == ACMEOrderValid || order.Status == ACMEOrderInvalid - if requiresFiltering { - filteredAuthorizationIds := []string{} - - for _, authId := range order.AuthorizationIds { - authorization, err := b.acmeState.LoadAuthorization(ac, uc, authId) - if err != nil { - return nil, err - } - - if (order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid) && - authorization.Status == ACMEAuthorizationValid { - filteredAuthorizationIds = append(filteredAuthorizationIds, authId) - } - } - - order.AuthorizationIds = filteredAuthorizationIds - } - - return formatOrderResponse(ac, order), nil -} - -func (b *backend) acmeListOrdersHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, acct *acmeAccount) (*logical.Response, error) { - orderIds, err := b.acmeState.ListOrderIds(ac, acct.KeyId) - if err != nil { - return nil, err - } - - orderUrls := []string{} - for _, orderId := range orderIds { - order, err := b.acmeState.LoadOrder(ac, uc, orderId) - if err != nil { - return nil, err - } - - if order.Status == ACMEOrderInvalid { - // Per RFC8555 -> 7.1.2.1 - Orders List - // The server SHOULD include pending orders and SHOULD NOT - // include orders that are invalid in the array of URLs. - continue - } - - orderUrls = append(orderUrls, buildOrderUrl(ac, orderId)) - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "orders": orderUrls, - }, - } - - return resp, nil -} - -func (b *backend) acmeNewOrderHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, _ *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { - identifiers, err := parseOrderIdentifiers(data) - if err != nil { - return nil, err - } - - notBefore, err := parseOptRFC3339Field(data, "notBefore") - if err != nil { - return nil, err - } - - notAfter, err := parseOptRFC3339Field(data, "notAfter") - if err != nil { - return nil, err - } - - if !notBefore.IsZero() || !notAfter.IsZero() { - return nil, fmt.Errorf("%w: NotBefore and NotAfter are not supported", ErrMalformed) - } - - err = validateAcmeProvidedOrderDates(notBefore, notAfter) - if err != nil { - return nil, err - } - - err = b.validateIdentifiersAgainstRole(ac.role, identifiers) - if err != nil { - return nil, err - } - - // Per RFC 8555 -> 7.1.3. Order Objects - // For pending orders, the authorizations that the client needs to complete before the - // requested certificate can be issued (see Section 7.5), including - // unexpired authorizations that the client has completed in the past - // for identifiers specified in the order. - // - // Since we are generating all authorizations here, there is no need to filter them out - // IF/WHEN we support pre-authz workflows and associate existing authorizations to this - // order they will need filtering. - var authorizations []*ACMEAuthorization - var authorizationIds []string - for _, identifier := range identifiers { - authz, err := generateAuthorization(account, identifier) - if err != nil { - return nil, fmt.Errorf("error generating authorizations: %w", err) - } - authorizations = append(authorizations, authz) - - err = b.acmeState.SaveAuthorization(ac, authz) - if err != nil { - return nil, fmt.Errorf("failed storing authorization: %w", err) - } - - authorizationIds = append(authorizationIds, authz.Id) - } - - order := &acmeOrder{ - OrderId: genUuid(), - AccountId: account.KeyId, - Status: ACMEOrderPending, - Expires: time.Now().Add(24 * time.Hour), // TODO: Readjust this based on authz and/or config - Identifiers: identifiers, - AuthorizationIds: authorizationIds, - } - - err = b.acmeState.SaveOrder(ac, order) - if err != nil { - return nil, fmt.Errorf("failed storing order: %w", err) - } - - resp := formatOrderResponse(ac, order) - - // Per RFC 8555 Section 7.4. Applying for Certificate Issuance: - // - // > If the server is willing to issue the requested certificate, it - // > responds with a 201 (Created) response. - resp.Data[logical.HTTPStatusCode] = http.StatusCreated - return resp, nil -} - -func validateAcmeProvidedOrderDates(notBefore time.Time, notAfter time.Time) error { - if !notBefore.IsZero() && !notAfter.IsZero() { - if notBefore.Equal(notAfter) { - return fmt.Errorf("%w: provided notBefore and notAfter dates can not be equal", ErrMalformed) - } - - if notBefore.After(notAfter) { - return fmt.Errorf("%w: provided notBefore can not be greater than notAfter", ErrMalformed) - } - } - - if !notAfter.IsZero() { - if time.Now().After(notAfter) { - return fmt.Errorf("%w: provided notAfter can not be in the past", ErrMalformed) - } - } - - return nil -} - -func formatOrderResponse(acmeCtx *acmeContext, order *acmeOrder) *logical.Response { - baseOrderUrl := buildOrderUrl(acmeCtx, order.OrderId) - - var authorizationUrls []string - for _, authId := range order.AuthorizationIds { - authorizationUrls = append(authorizationUrls, buildAuthorizationUrl(acmeCtx, authId)) - } - - var identifiers []map[string]interface{} - for _, identifier := range order.Identifiers { - identifiers = append(identifiers, identifier.NetworkMarshal( /* use original value */ true)) - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "status": order.Status, - "expires": order.Expires.Format(time.RFC3339), - "identifiers": identifiers, - "authorizations": authorizationUrls, - "finalize": baseOrderUrl + "/finalize", - }, - Headers: map[string][]string{ - "Location": {baseOrderUrl}, - }, - } - - // Only reply with the certificate URL if we are in a valid order state. - if order.Status == ACMEOrderValid { - resp.Data["certificate"] = baseOrderUrl + "/cert" - } - - return resp -} - -func buildAuthorizationUrl(acmeCtx *acmeContext, authId string) string { - return acmeCtx.baseUrl.JoinPath("authorization", authId).String() -} - -func buildOrderUrl(acmeCtx *acmeContext, orderId string) string { - return acmeCtx.baseUrl.JoinPath("order", orderId).String() -} - -func generateAuthorization(acct *acmeAccount, identifier *ACMEIdentifier) (*ACMEAuthorization, error) { - authId := genUuid() - - // Certain challenges have certain restrictions: DNS challenges cannot - // be used to validate IP addresses, and only DNS challenges can be used - // to validate wildcards. - allowedChallenges := []ACMEChallengeType{ACMEHTTPChallenge, ACMEDNSChallenge, ACMEALPNChallenge} - if identifier.Type == ACMEIPIdentifier { - allowedChallenges = []ACMEChallengeType{ACMEHTTPChallenge} - } else if identifier.IsWildcard { - allowedChallenges = []ACMEChallengeType{ACMEDNSChallenge} - } - - var challenges []*ACMEChallenge - for _, challengeType := range allowedChallenges { - token, err := getACMEToken() - if err != nil { - return nil, err - } - - challenge := &ACMEChallenge{ - Type: challengeType, - Status: ACMEChallengePending, - ChallengeFields: map[string]interface{}{ - "token": token, - }, - } - - challenges = append(challenges, challenge) - } - - return &ACMEAuthorization{ - Id: authId, - AccountId: acct.KeyId, - Identifier: identifier, - Status: ACMEAuthorizationPending, - Expires: "", // only populated when it switches to valid. - Challenges: challenges, - Wildcard: identifier.IsWildcard, - }, nil -} - -func parseOptRFC3339Field(data map[string]interface{}, keyName string) (time.Time, error) { - var timeVal time.Time - var err error - - rawBefore, present := data[keyName] - if present { - beforeStr, ok := rawBefore.(string) - if !ok { - return timeVal, fmt.Errorf("invalid type (%T) for field '%s': %w", rawBefore, keyName, ErrMalformed) - } - timeVal, err = time.Parse(time.RFC3339, beforeStr) - if err != nil { - return timeVal, fmt.Errorf("failed parsing field '%s' (%s): %s: %w", keyName, rawBefore, err.Error(), ErrMalformed) - } - - if timeVal.IsZero() { - return timeVal, fmt.Errorf("provided time value is invalid '%s' (%s): %w", keyName, rawBefore, ErrMalformed) - } - } - - return timeVal, nil -} - -func parseOrderIdentifiers(data map[string]interface{}) ([]*ACMEIdentifier, error) { - rawIdentifiers, present := data["identifiers"] - if !present { - return nil, fmt.Errorf("missing required identifiers argument: %w", ErrMalformed) - } - - listIdentifiers, ok := rawIdentifiers.([]interface{}) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for field 'identifiers': %w", rawIdentifiers, ErrMalformed) - } - - var identifiers []*ACMEIdentifier - for _, rawIdentifier := range listIdentifiers { - mapIdentifier, ok := rawIdentifier.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("invalid type (%T) for value in 'identifiers': %w", rawIdentifier, ErrMalformed) - } - - typeVal, present := mapIdentifier["type"] - if !present { - return nil, fmt.Errorf("missing type argument for value in 'identifiers': %w", ErrMalformed) - } - typeStr, ok := typeVal.(string) - if !ok { - return nil, fmt.Errorf("invalid type for type argument (%T) for value in 'identifiers': %w", typeStr, ErrMalformed) - } - - valueVal, present := mapIdentifier["value"] - if !present { - return nil, fmt.Errorf("missing value argument for value in 'identifiers': %w", ErrMalformed) - } - valueStr, ok := valueVal.(string) - if !ok { - return nil, fmt.Errorf("invalid type for value argument (%T) for value in 'identifiers': %w", valueStr, ErrMalformed) - } - - if len(valueStr) == 0 { - return nil, fmt.Errorf("value argument for value in 'identifiers' can not be blank: %w", ErrMalformed) - } - - identifier := &ACMEIdentifier{ - Value: valueStr, - OriginalValue: valueStr, - } - - switch typeStr { - case string(ACMEIPIdentifier): - identifier.Type = ACMEIPIdentifier - ip := net.ParseIP(valueStr) - if ip == nil { - return nil, fmt.Errorf("value argument (%s) failed validation: failed parsing as IP: %w", valueStr, ErrMalformed) - } - case string(ACMEDNSIdentifier): - identifier.Type = ACMEDNSIdentifier - - // This check modifies the identifier if it is a wildcard, - // removing the non-wildcard portion. We do this before the - // IP address checks, in case of an attempt to bypass the IP/DNS - // check via including a leading wildcard (e.g., *.127.0.0.1). - // - // Per RFC 8555 Section 7.1.4. Authorization Objects: - // - // > Wildcard domain names (with "*" as the first label) MUST NOT - // > be included in authorization objects. - if _, _, err := identifier.MaybeParseWildcard(); err != nil { - return nil, fmt.Errorf("value argument (%s) failed validation: invalid wildcard: %v: %w", valueStr, err, ErrMalformed) - } - - if isIP := net.ParseIP(identifier.Value); isIP != nil { - return nil, fmt.Errorf("refusing to accept argument (%s) as DNS type identifier: parsed OK as IP address: %w", valueStr, ErrMalformed) - } - - // Use the reduced (identifier.Value) in case this was a wildcard - // domain. - p := idna.New(idna.ValidateForRegistration()) - converted, err := p.ToASCII(identifier.Value) - if err != nil { - return nil, fmt.Errorf("value argument (%s) failed validation: %s: %w", valueStr, err.Error(), ErrMalformed) - } - - // Per RFC 8555 Section 7.1.4. Authorization Objects: - // - // > The domain name MUST be encoded in the form in which it - // > would appear in a certificate. That is, it MUST be encoded - // > according to the rules in Section 7 of [RFC5280]. Servers - // > MUST verify any identifier values that begin with the - // > ASCII-Compatible Encoding prefix "xn--" as defined in - // > [RFC5890] are properly encoded. - if identifier.Value != converted { - return nil, fmt.Errorf("value argument (%s) failed IDNA round-tripping to ASCII: %w", valueStr, ErrMalformed) - } - default: - return nil, fmt.Errorf("unsupported identifier type %s: %w", typeStr, ErrUnsupportedIdentifier) - } - - identifiers = append(identifiers, identifier) - } - - return identifiers, nil -} - -func (b *backend) acmeTidyOrder(ac *acmeContext, accountId string, orderPath string, certTidyBuffer time.Duration) (bool, time.Time, error) { - // First we get the order; note that the orderPath includes the account - // It's only accessed at acme/orders/ with the account context - // It's saved at acme//orders/ - entry, err := ac.sc.Storage.Get(ac.sc.Context, orderPath) - if err != nil { - return false, time.Time{}, fmt.Errorf("error loading order: %w", err) - } - if entry == nil { - return false, time.Time{}, fmt.Errorf("order does not exist: %w", ErrMalformed) - } - var order acmeOrder - err = entry.DecodeJSON(&order) - if err != nil { - return false, time.Time{}, fmt.Errorf("error decoding order: %w", err) - } - - // Determine whether we should tidy this order - shouldTidy := false - - // Track either the order expiry or certificate expiry to return to the caller, this - // can be used to influence the account's expiry - orderExpiry := order.CertificateExpiry - - // It is faster to check certificate information on the order entry rather than fetch the cert entry to parse: - if !order.CertificateExpiry.IsZero() { - // This implies that a certificate exists - // When a certificate exists, we want to expire and tidy the order when we tidy the certificate: - if time.Now().After(order.CertificateExpiry.Add(certTidyBuffer)) { // It's time to clean - shouldTidy = true - } - } else { - // This implies that no certificate exists - // In this case, we want to expire the order after it has expired (+ some safety buffer) - if time.Now().After(order.Expires) { - shouldTidy = true - } - orderExpiry = order.Expires - } - if shouldTidy == false { - return shouldTidy, orderExpiry, nil - } - - // Tidy this Order - // That includes any certificate acme//orders/orderPath/cert - // That also includes any related authorizations: acme//authorizations/ - - // First Authorizations - for _, authorizationId := range order.AuthorizationIds { - err = ac.sc.Storage.Delete(ac.sc.Context, getAuthorizationPath(accountId, authorizationId)) - if err != nil { - return false, orderExpiry, err - } - } - - // Normal Tidy will Take Care of the Certificate, we need to clean up the certificate to account tracker though - err = ac.sc.Storage.Delete(ac.sc.Context, getAcmeSerialToAccountTrackerPath(accountId, order.CertificateSerialNumber)) - if err != nil { - return false, orderExpiry, err - } - - // And Finally, the order: - err = ac.sc.Storage.Delete(ac.sc.Context, orderPath) - if err != nil { - return false, orderExpiry, err - } - b.tidyStatusIncDelAcmeOrderCount() - - return true, orderExpiry, nil -} diff --git a/builtin/logical/pki/path_acme_order_test.go b/builtin/logical/pki/path_acme_order_test.go deleted file mode 100644 index 5340bbd31caca..0000000000000 --- a/builtin/logical/pki/path_acme_order_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "net" - "testing" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" -) - -// TestACME_ValidateIdentifiersAgainstRole Verify the ACME order creation -// function verifies somewhat the identifiers that were provided have a -// decent chance of being allowed by the selected role. -func TestACME_ValidateIdentifiersAgainstRole(t *testing.T) { - b, _ := CreateBackendWithStorage(t) - - tests := []struct { - name string - role *roleEntry - identifiers []*ACMEIdentifier - expectErr bool - }{ - { - name: "verbatim-role-allows-dns-ip", - role: buildSignVerbatimRoleWithNoData(nil), - identifiers: _buildACMEIdentifiers("test.com", "127.0.0.1"), - expectErr: false, - }, - { - name: "default-role-does-not-allow-dns", - role: buildTestRole(t, nil), - identifiers: _buildACMEIdentifiers("www.test.com"), - expectErr: true, - }, - { - name: "default-role-allows-ip", - role: buildTestRole(t, nil), - identifiers: _buildACMEIdentifiers("192.168.0.1"), - expectErr: false, - }, - { - name: "disable-ip-sans-forbids-ip", - role: buildTestRole(t, map[string]interface{}{"allow_ip_sans": false}), - identifiers: _buildACMEIdentifiers("192.168.0.1"), - expectErr: true, - }, - { - name: "role-no-wildcards-allowed-without", - role: buildTestRole(t, map[string]interface{}{ - "allow_subdomains": true, - "allow_bare_domains": true, - "allowed_domains": []string{"test.com"}, - "allow_wildcard_certificates": false, - }), - identifiers: _buildACMEIdentifiers("www.test.com", "test.com"), - expectErr: false, - }, - { - name: "role-no-wildcards-allowed-with-wildcard", - role: buildTestRole(t, map[string]interface{}{ - "allow_subdomains": true, - "allowed_domains": []string{"test.com"}, - "allow_wildcard_certificates": false, - }), - identifiers: _buildACMEIdentifiers("*.test.com"), - expectErr: true, - }, - { - name: "role-wildcards-allowed-with-wildcard", - role: buildTestRole(t, map[string]interface{}{ - "allow_subdomains": true, - "allowed_domains": []string{"test.com"}, - "allow_wildcard_certificates": true, - }), - identifiers: _buildACMEIdentifiers("*.test.com"), - expectErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := b.validateIdentifiersAgainstRole(tt.role, tt.identifiers) - - if tt.expectErr { - require.Error(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) - // If we did return an error if should be classified as a ErrRejectedIdentifier - require.ErrorIs(t, err, ErrRejectedIdentifier) - } else { - require.NoError(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) - } - }) - } -} - -func _buildACMEIdentifiers(values ...string) []*ACMEIdentifier { - var identifiers []*ACMEIdentifier - - for _, value := range values { - identifiers = append(identifiers, _buildACMEIdentifier(value)) - } - - return identifiers -} - -func _buildACMEIdentifier(val string) *ACMEIdentifier { - ip := net.ParseIP(val) - if ip == nil { - identifier := &ACMEIdentifier{Type: "dns", Value: val, OriginalValue: val, IsWildcard: false} - _, _, _ = identifier.MaybeParseWildcard() - return identifier - } - - return &ACMEIdentifier{Type: "ip", Value: val, OriginalValue: val, IsWildcard: false} -} - -// Easily allow tests to create valid roles with proper defaults, since we don't have an easy -// way to generate roles with proper defaults, go through the createRole handler with the handlers -// field data so we pickup all the defaults specified there. -func buildTestRole(t *testing.T, config map[string]interface{}) *roleEntry { - b, s := CreateBackendWithStorage(t) - - path := pathRoles(b) - fields := path.Fields - if config == nil { - config = map[string]interface{}{} - } - - if _, exists := config["name"]; !exists { - config["name"] = genUuid() - } - - _, err := b.pathRoleCreate(ctx, &logical.Request{Storage: s}, &framework.FieldData{Raw: config, Schema: fields}) - require.NoError(t, err, "failed generating role with config %v", config) - - role, err := b.getRole(ctx, s, config["name"].(string)) - require.NoError(t, err, "failed loading stored role") - - return role -} diff --git a/builtin/logical/pki/path_acme_revoke.go b/builtin/logical/pki/path_acme_revoke.go deleted file mode 100644 index 9a71f1cd91100..0000000000000 --- a/builtin/logical/pki/path_acme_revoke.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathAcmeRevoke(b *backend) []*framework.Path { - return buildAcmeFrameworkPaths(b, patternAcmeRevoke, "/revoke-cert") -} - -func patternAcmeRevoke(b *backend, pattern string) *framework.Path { - fields := map[string]*framework.FieldSchema{} - addFieldsForACMEPath(fields, pattern) - addFieldsForACMERequest(fields) - - return &framework.Path{ - Pattern: pattern, - Fields: fields, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.acmeParsedWrapper(b.acmeRevocationHandler), - ForwardPerformanceSecondary: false, - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathAcmeHelpSync, - HelpDescription: pathAcmeHelpDesc, - } -} - -func (b *backend) acmeRevocationHandler(acmeCtx *acmeContext, _ *logical.Request, _ *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { - var cert *x509.Certificate - - rawCertificate, present := data["certificate"] - if present { - certBase64, ok := rawCertificate.(string) - if !ok { - return nil, fmt.Errorf("invalid type (%T; expected string) for field 'certificate': %w", rawCertificate, ErrMalformed) - } - - certBytes, err := base64.RawURLEncoding.DecodeString(certBase64) - if err != nil { - return nil, fmt.Errorf("failed to base64 decode certificate: %v: %w", err, ErrMalformed) - } - - cert, err = x509.ParseCertificate(certBytes) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %v: %w", err, ErrMalformed) - } - } else { - return nil, fmt.Errorf("bad request was lacking required field 'certificate': %w", ErrMalformed) - } - - rawReason, present := data["reason"] - if present { - reason, ok := rawReason.(float64) - if !ok { - return nil, fmt.Errorf("invalid type (%T; expected float64) for field 'reason': %w", rawReason, ErrMalformed) - } - - if int(reason) != 0 { - return nil, fmt.Errorf("Vault does not support revocation reasons (got %v; expected omitted or 0/unspecified): %w", int(reason), ErrBadRevocationReason) - } - } - - // If the certificate expired, there's no point in revoking it. - if cert.NotAfter.Before(time.Now()) { - return nil, fmt.Errorf("refusing to revoke expired certificate: %w", ErrMalformed) - } - - // Fetch the CRL config as we need it to ultimately do the - // revocation. This should be cached and thus relatively fast. - config, err := b.crlBuilder.getConfigWithUpdate(acmeCtx.sc) - if err != nil { - return nil, fmt.Errorf("unable to revoke certificate: failed reading revocation config: %v: %w", err, ErrServerInternal) - } - - // Load our certificate from storage to ensure it exists and matches - // what was given to us. - serial := serialFromCert(cert) - certEntry, err := fetchCertBySerial(acmeCtx.sc, "certs/", serial) - if err != nil { - return nil, fmt.Errorf("unable to revoke certificate: err reading global cert entry: %v: %w", err, ErrServerInternal) - } - if certEntry == nil { - return nil, fmt.Errorf("unable to revoke certificate: no global cert entry found: %w", ErrServerInternal) - } - - // Validate that the provided certificate matches the stored - // certificate. This completes the chain of: - // - // provided_auth -> provided_cert == stored cert. - // - // Allowing revocation to be safe. - // - // We use the non-subtle unsafe bytes equality check here as we have - // already fetched this certificate from storage, thus already leaking - // timing information that this cert exists. The user could thus simply - // fetch the cert from Vault matching this serial number via the unauthed - // pki/certs/:serial API endpoint. - if !bytes.Equal(certEntry.Value, cert.Raw) { - return nil, fmt.Errorf("unable to revoke certificate: supplied certificate does not match CA's stored value: %w", ErrMalformed) - } - - // Check if it was already revoked; in this case, we do not need to - // revoke it again and want to respond with an appropriate error message. - revEntry, err := fetchCertBySerial(acmeCtx.sc, "revoked/", serial) - if err != nil { - return nil, fmt.Errorf("unable to revoke certificate: err reading revocation entry: %v: %w", err, ErrServerInternal) - } - if revEntry != nil { - return nil, fmt.Errorf("unable to revoke certificate: %w", ErrAlreadyRevoked) - } - - // Finally, do the relevant permissions/authorization check as - // appropriate based on the type of revocation happening. - if !userCtx.Existing { - return b.acmeRevocationByPoP(acmeCtx, userCtx, cert, config) - } - - return b.acmeRevocationByAccount(acmeCtx, userCtx, cert, config) -} - -func (b *backend) acmeRevocationByPoP(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *crlConfig) (*logical.Response, error) { - // Since this account does not exist, ensure we've gotten a private key - // matching the certificate's public key. - signer, ok := userCtx.Key.Key.(crypto.Signer) - if !ok { - return nil, fmt.Errorf("unable to revoke certificate: unable to parse JWS key of type (%T): %w", userCtx.Key.Key, ErrMalformed) - } - - // Ensure that our PoP is indeed valid. - if err := validatePrivateKeyMatchesCert(signer, cert); err != nil { - return nil, fmt.Errorf("unable to revoke certificate: unable to verify proof of possession: %v: %w", err, ErrMalformed) - } - - // Now it is safe to revoke. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() - - return revokeCert(acmeCtx.sc, config, cert) -} - -func (b *backend) acmeRevocationByAccount(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *crlConfig) (*logical.Response, error) { - // Fetch the account; disallow revocations from non-valid-status accounts. - _, err := requireValidAcmeAccount(acmeCtx, userCtx) - if err != nil { - return nil, fmt.Errorf("failed to lookup account: %w", err) - } - - // We only support certificates issued by this user, we don't support - // cross-account revocations. - serial := serialFromCert(cert) - acmeEntry, err := b.acmeState.GetIssuedCert(acmeCtx, userCtx.Kid, serial) - if err != nil || acmeEntry == nil { - return nil, fmt.Errorf("unable to revoke certificate: %v: %w", err, ErrMalformed) - } - - // Now it is safe to revoke. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() - - return revokeCert(acmeCtx.sc, config, cert) -} diff --git a/builtin/logical/pki/path_acme_test.go b/builtin/logical/pki/path_acme_test.go deleted file mode 100644 index aed906863466f..0000000000000 --- a/builtin/logical/pki/path_acme_test.go +++ /dev/null @@ -1,1578 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "os" - "path" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/sdk/helper/certutil" - - "github.com/go-test/deep" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/acme" - "golang.org/x/net/http2" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/pki/dnstest" - "github.com/hashicorp/vault/helper/constants" - "github.com/hashicorp/vault/helper/testhelpers" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -// TestAcmeBasicWorkflow a test that will validate a basic ACME workflow using the Golang ACME client. -func TestAcmeBasicWorkflow(t *testing.T) { - t.Parallel() - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - cases := []struct { - name string - prefixUrl string - }{ - {"root", "acme/"}, - {"role", "roles/test-role/acme/"}, - {"issuer", "issuer/int-ca/acme/"}, - {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, - } - testCtx := context.Background() - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - baseAcmeURL := "/v1/pki/" + tc.prefixUrl - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - t.Logf("Testing discover on %s", baseAcmeURL) - discovery, err := acmeClient.Discover(testCtx) - require.NoError(t, err, "failed acme discovery call") - - discoveryBaseUrl := client.Address() + baseAcmeURL - require.Equal(t, discoveryBaseUrl+"new-nonce", discovery.NonceURL) - require.Equal(t, discoveryBaseUrl+"new-account", discovery.RegURL) - require.Equal(t, discoveryBaseUrl+"new-order", discovery.OrderURL) - require.Equal(t, discoveryBaseUrl+"revoke-cert", discovery.RevokeURL) - require.Equal(t, discoveryBaseUrl+"key-change", discovery.KeyChangeURL) - require.False(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") - - // Attempt to update prior to creating an account - t.Logf("Testing updates with no proper account fail on %s", baseAcmeURL) - _, err = acmeClient.UpdateReg(testCtx, &acme.Account{Contact: []string{"mailto:shouldfail@example.com"}}) - require.ErrorIs(t, err, acme.ErrNoAccount, "expected failure attempting to update prior to account registration") - - // Create new account - t.Logf("Testing register on %s", baseAcmeURL) - acct, err := acmeClient.Register(testCtx, &acme.Account{ - Contact: []string{"mailto:test@example.com", "mailto:test2@test.com"}, - }, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - require.Equal(t, acme.StatusValid, acct.Status) - require.Contains(t, acct.Contact, "mailto:test@example.com") - require.Contains(t, acct.Contact, "mailto:test2@test.com") - require.Len(t, acct.Contact, 2) - - // Call register again we should get existing account - t.Logf("Testing duplicate register returns existing account on %s", baseAcmeURL) - _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) - require.ErrorIs(t, err, acme.ErrAccountAlreadyExists, - "We should have returned a 200 status code which would have triggered an error in the golang acme"+ - " library") - - // Update contact - t.Logf("Testing Update account contacts on %s", baseAcmeURL) - acct.Contact = []string{"mailto:test3@example.com"} - acct2, err := acmeClient.UpdateReg(testCtx, acct) - require.NoError(t, err, "failed updating account") - require.Equal(t, acme.StatusValid, acct2.Status) - // We should get this back, not the original values. - require.Contains(t, acct2.Contact, "mailto:test3@example.com") - require.Len(t, acct2.Contact, 1) - - // Make sure order's do not accept dates - _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, - acme.WithOrderNotBefore(time.Now().Add(10*time.Minute))) - require.Error(t, err, "should have rejected a new order with NotBefore set") - - _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, - acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) - require.Error(t, err, "should have rejected a new order with NotAfter set") - - // Make sure DNS identifiers cannot include IP addresses - _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "127.0.0.1"}}, - acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) - require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") - _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "*.127.0.0.1"}}, - acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) - require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") - - // Create an order - t.Logf("Testing Authorize Order on %s", baseAcmeURL) - identifiers := []string{"localhost.localdomain", "*.localdomain"} - createOrder, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ - {Type: "dns", Value: identifiers[0]}, - {Type: "dns", Value: identifiers[1]}, - }) - require.NoError(t, err, "failed creating order") - require.Equal(t, acme.StatusPending, createOrder.Status) - require.Empty(t, createOrder.CertURL) - require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) - require.Len(t, createOrder.AuthzURLs, 2, "expected two authzurls") - - // Get order - t.Logf("Testing GetOrder on %s", baseAcmeURL) - getOrder, err := acmeClient.GetOrder(testCtx, createOrder.URI) - require.NoError(t, err, "failed fetching order") - require.Equal(t, acme.StatusPending, createOrder.Status) - if diffs := deep.Equal(createOrder, getOrder); diffs != nil { - t.Fatalf("Differences exist between create and get order: \n%v", strings.Join(diffs, "\n")) - } - - // Make sure the identifiers returned in the order contain the original values - var ids []string - for _, id := range getOrder.Identifiers { - require.Equal(t, "dns", id.Type) - ids = append(ids, id.Value) - } - require.ElementsMatch(t, identifiers, ids, "order responses should have all original identifiers") - - // Load authorizations - var authorizations []*acme.Authorization - for _, authUrl := range getOrder.AuthzURLs { - auth, err := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, err, "failed fetching authorization: %s", authUrl) - - authorizations = append(authorizations, auth) - } - - // We should have 2 separate auth challenges as we have two separate identifier - require.Len(t, authorizations, 2, "expected 2 authorizations in order") - - var wildcardAuth *acme.Authorization - var domainAuth *acme.Authorization - for _, auth := range authorizations { - if auth.Wildcard { - wildcardAuth = auth - } else { - domainAuth = auth - } - } - - // Test the values for the domain authentication - require.Equal(t, acme.StatusPending, domainAuth.Status) - require.Equal(t, "dns", domainAuth.Identifier.Type) - require.Equal(t, "localhost.localdomain", domainAuth.Identifier.Value) - require.False(t, domainAuth.Wildcard, "should not be a wildcard") - require.True(t, domainAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") - - require.Len(t, domainAuth.Challenges, 3, "expected three challenges") - require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) - require.True(t, domainAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") - require.Equal(t, "http-01", domainAuth.Challenges[0].Type) - require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") - require.Equal(t, acme.StatusPending, domainAuth.Challenges[1].Status) - require.True(t, domainAuth.Challenges[1].Validated.IsZero(), "validated time should be 0 on challenge") - require.Equal(t, "dns-01", domainAuth.Challenges[1].Type) - require.NotEmpty(t, domainAuth.Challenges[1].Token, "missing challenge token") - require.Equal(t, acme.StatusPending, domainAuth.Challenges[2].Status) - require.True(t, domainAuth.Challenges[2].Validated.IsZero(), "validated time should be 0 on challenge") - require.Equal(t, "tls-alpn-01", domainAuth.Challenges[2].Type) - require.NotEmpty(t, domainAuth.Challenges[2].Token, "missing challenge token") - - // Test the values for the wildcard authentication - require.Equal(t, acme.StatusPending, wildcardAuth.Status) - require.Equal(t, "dns", wildcardAuth.Identifier.Type) - require.Equal(t, "localdomain", wildcardAuth.Identifier.Value) // Make sure we strip the *. in auth responses - require.True(t, wildcardAuth.Wildcard, "should be a wildcard") - require.True(t, wildcardAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") - - require.Len(t, wildcardAuth.Challenges, 1, "expected one challenge") - require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) - require.True(t, wildcardAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") - require.Equal(t, "dns-01", wildcardAuth.Challenges[0].Type) - require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") - - // Make sure that getting a challenge does not start it. - challenge, err := acmeClient.GetChallenge(testCtx, domainAuth.Challenges[0].URI) - require.NoError(t, err, "failed to load challenge") - require.Equal(t, acme.StatusPending, challenge.Status) - require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") - require.Equal(t, "http-01", challenge.Type) - - // Accept a challenge; this triggers validation to start. - challenge, err = acmeClient.Accept(testCtx, domainAuth.Challenges[0]) - require.NoError(t, err, "failed to load challenge") - require.Equal(t, acme.StatusProcessing, challenge.Status) - require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") - require.Equal(t, "http-01", challenge.Type) - - require.NotEmpty(t, challenge.Token, "missing challenge token") - - // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow - // test. - markAuthorizationSuccess(t, client, acmeClient, acct, getOrder) - - // Make sure sending a CSR with the account key gets rejected. - goodCr := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: identifiers[1]}, - DNSNames: []string{identifiers[0], identifiers[1]}, - } - t.Logf("csr: %v", goodCr) - - // We want to make sure people are not using the same keys for CSR/Certs and their ACME account. - csrSignedWithAccountKey, err := x509.CreateCertificateRequest(rand.Reader, goodCr, accountKey) - require.NoError(t, err, "failed generating csr") - _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrSignedWithAccountKey, true) - require.Error(t, err, "should not be allowed to use the account key for a CSR") - - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generated key for CSR") - - // Validate we reject CSRs that contain CN that aren't in the original order - badCr := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: "not-in-original-order.com"}, - DNSNames: []string{identifiers[0], identifiers[1]}, - } - t.Logf("csr: %v", badCr) - - csrWithBadCName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) - require.NoError(t, err, "failed generating csr with bad common name") - - _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadCName, true) - require.Error(t, err, "should not be allowed to csr with different common names than order") - - // Validate we reject CSRs that contain DNS names that aren't in the original order - badCr = &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, - DNSNames: []string{"www.notinorder.com"}, - } - - csrWithBadName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) - require.NoError(t, err, "failed generating csr with bad name") - - _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) - require.Error(t, err, "should not be allowed to csr with different names than order") - - // Validate we reject CSRs that contain IP addreses that weren't in the original order - badCr = &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, - IPAddresses: []net.IP{{127, 0, 0, 1}}, - } - - csrWithBadIP, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) - require.NoError(t, err, "failed generating csr with bad name") - - _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadIP, true) - require.Error(t, err, "should not be allowed to csr with different ip address than order") - - // Validate we reject CSRs that contains fewer names than in the original order. - badCr = &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: identifiers[0]}, - } - - csrWithBadName, err = x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) - require.NoError(t, err, "failed generating csr with bad name") - - _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) - require.Error(t, err, "should not be allowed to csr with different names than order") - - // Finally test a proper CSR, with the correct name and signed with a different key works. - csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) - require.NoError(t, err, "failed finalizing order") - require.Len(t, certs, 3, "expected three items within the returned certs") - - testAcmeCertSignedByCa(t, client, certs, "int-ca") - - // Make sure the certificate has a NotAfter date of a maximum of 90 days - acmeCert, err := x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert bytes") - maxAcmeNotAfter := time.Now().Add(maxAcmeCertTTL) - if maxAcmeNotAfter.Before(acmeCert.NotAfter) { - require.Fail(t, fmt.Sprintf("certificate has a NotAfter value %v greater than ACME max ttl %v", acmeCert.NotAfter, maxAcmeNotAfter)) - } - // Deactivate account - t.Logf("Testing deactivate account on %s", baseAcmeURL) - err = acmeClient.DeactivateReg(testCtx) - require.NoError(t, err, "failed deactivating account") - - // Make sure we get an unauthorized error trying to update the account again. - t.Logf("Testing update on deactivated account fails on %s", baseAcmeURL) - _, err = acmeClient.UpdateReg(testCtx, acct) - require.Error(t, err, "expected account to be deactivated") - require.IsType(t, &acme.Error{}, err, "expected acme error type") - acmeErr := err.(*acme.Error) - require.Equal(t, "urn:ietf:params:acme:error:unauthorized", acmeErr.ProblemType) - }) - } -} - -// TestAcmeBasicWorkflowWithEab verify that new accounts require EAB's if enforced by configuration. -func TestAcmeBasicWorkflowWithEab(t *testing.T) { - t.Parallel() - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - testCtx := context.Background() - - // Enable EAB - _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ - "enabled": true, - "eab_policy": "always-required", - }) - require.NoError(t, err) - - cases := []struct { - name string - prefixUrl string - }{ - {"root", "acme/"}, - {"role", "roles/test-role/acme/"}, - {"issuer", "issuer/int-ca/acme/"}, - {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - baseAcmeURL := "/v1/pki/" + tc.prefixUrl - accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed creating ec key") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - t.Logf("Testing discover on %s", baseAcmeURL) - discovery, err := acmeClient.Discover(testCtx) - require.NoError(t, err, "failed acme discovery call") - require.True(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") - - // Create new account without EAB, should fail - t.Logf("Testing register on %s", baseAcmeURL) - _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) - require.ErrorContains(t, err, "urn:ietf:params:acme:error:externalAccountRequired", - "expected failure creating an account without eab") - - // Test fetch, list, delete workflow - kid, _ := getEABKey(t, client, tc.prefixUrl) - resp, err := client.Logical().ListWithContext(testCtx, "pki/eab") - require.NoError(t, err, "failed to list eab tokens") - require.NotNil(t, resp, "list response for eab tokens should not be nil") - require.Contains(t, resp.Data, "keys") - require.Contains(t, resp.Data, "key_info") - require.Len(t, resp.Data["keys"], 1) - require.Contains(t, resp.Data["keys"], kid) - - _, err = client.Logical().DeleteWithContext(testCtx, "pki/eab/"+kid) - require.NoError(t, err, "failed to delete eab") - - // List eabs should return zero results - resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") - require.NoError(t, err, "failed to list eab tokens") - require.Nil(t, resp, "list response for eab tokens should have been nil") - - // fetch a new EAB - kid, eabKeyBytes := getEABKey(t, client, tc.prefixUrl) - acct := &acme.Account{ - ExternalAccountBinding: &acme.ExternalAccountBinding{ - KID: kid, - Key: eabKeyBytes, - }, - } - - // Make sure we can list our key - resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") - require.NoError(t, err, "failed to list eab tokens") - require.NotNil(t, resp, "list response for eab tokens should not be nil") - require.Contains(t, resp.Data, "keys") - require.Contains(t, resp.Data, "key_info") - require.Len(t, resp.Data["keys"], 1) - require.Contains(t, resp.Data["keys"], kid) - - keyInfo := resp.Data["key_info"].(map[string]interface{}) - require.Contains(t, keyInfo, kid) - - infoForKid := keyInfo[kid].(map[string]interface{}) - require.Equal(t, "hs", infoForKid["key_type"]) - require.Equal(t, tc.prefixUrl+"directory", infoForKid["acme_directory"]) - - // Create new account with EAB - t.Logf("Testing register on %s", baseAcmeURL) - _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering new account with eab") - - // Make sure our EAB is no longer available - resp, err = client.Logical().ListWithContext(context.Background(), "pki/eab") - require.NoError(t, err, "failed to list eab tokens") - require.Nil(t, resp, "list response for eab tokens should have been nil due to empty list") - - // Attempt to create another account with the same EAB as before -- should fail - accountKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed creating ec key") - - acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) - acct2 := &acme.Account{ - ExternalAccountBinding: &acme.ExternalAccountBinding{ - KID: kid, - Key: eabKeyBytes, - }, - } - - _, err = acmeClient2.Register(testCtx, acct2, func(tosURL string) bool { return true }) - require.ErrorContains(t, err, "urn:ietf:params:acme:error:unauthorized", "should fail due to EAB re-use") - - // We can lookup/find an existing account without EAB if we have the account key - _, err = acmeClient.GetReg(testCtx /* unused url */, "") - require.NoError(t, err, "expected to lookup existing account without eab") - }) - } -} - -// TestAcmeNonce a basic test that will validate we get back a nonce with the proper status codes -// based on the -func TestAcmeNonce(t *testing.T) { - t.Parallel() - cluster, client, pathConfig := setupAcmeBackend(t) - defer cluster.Cleanup() - - cases := []struct { - name string - prefixUrl string - directoryUrl string - }{ - {"root", "", "pki/acme/new-nonce"}, - {"role", "/roles/test-role", "pki/roles/test-role/acme/new-nonce"}, - {"issuer", "/issuer/default", "pki/issuer/default/acme/new-nonce"}, - {"issuer_role", "/issuer/default/roles/test-role", "pki/issuer/default/roles/test-role/acme/new-nonce"}, - } - - for _, tc := range cases { - for _, httpOp := range []string{"get", "header"} { - t.Run(fmt.Sprintf("%s-%s", tc.name, httpOp), func(t *testing.T) { - var req *api.Request - switch httpOp { - case "get": - req = client.NewRequest(http.MethodGet, "/v1/"+tc.directoryUrl) - case "header": - req = client.NewRequest(http.MethodHead, "/v1/"+tc.directoryUrl) - } - res, err := client.RawRequestWithContext(ctx, req) - require.NoError(t, err, "failed sending raw request") - _ = res.Body.Close() - - // Proper Status Code - switch httpOp { - case "get": - require.Equal(t, http.StatusNoContent, res.StatusCode) - case "header": - require.Equal(t, http.StatusOK, res.StatusCode) - } - - // Make sure we don't have a Content-Type header. - require.Equal(t, "", res.Header.Get("Content-Type")) - - // Make sure we return the Cache-Control header - require.Contains(t, res.Header.Get("Cache-Control"), "no-store", - "missing Cache-Control header with no-store header value") - - // Test for our nonce header value - require.NotEmpty(t, res.Header.Get("Replay-Nonce"), "missing Replay-Nonce header with an actual value") - - // Test Link header value - expectedLinkHeader := fmt.Sprintf("<%s>;rel=\"index\"", pathConfig+tc.prefixUrl+"/acme/directory") - require.Contains(t, res.Header.Get("Link"), expectedLinkHeader, - "different value for link header than expected") - }) - } - } -} - -// TestAcmeClusterPathNotConfigured basic testing of the ACME error handler. -func TestAcmeClusterPathNotConfigured(t *testing.T) { - t.Parallel() - cluster, client := setupTestPkiCluster(t) - defer cluster.Cleanup() - - // Go sneaky, sneaky and update the acme configuration through sys/raw to bypass config/cluster path checks - pkiMount := findStorageMountUuid(t, client, "pki") - rawPath := path.Join("/sys/raw/logical/", pkiMount, storageAcmeConfig) - _, err := client.Logical().WriteWithContext(context.Background(), rawPath, map[string]interface{}{ - "value": "{\"enabled\": true, \"eab_policy_name\": \"not-required\"}", - }) - require.NoError(t, err, "failed updating acme config through sys/raw") - - // Force reload the plugin so we read the new config we slipped in. - _, err = client.Sys().ReloadPluginWithContext(context.Background(), &api.ReloadPluginInput{Mounts: []string{"pki"}}) - require.NoError(t, err, "failed reloading plugin") - - // Do not fill in the path option within the local cluster configuration - cases := []struct { - name string - directoryUrl string - }{ - {"root", "pki/acme/directory"}, - {"role", "pki/roles/test-role/acme/directory"}, - {"issuer", "pki/issuer/default/acme/directory"}, - {"issuer_role", "pki/issuer/default/roles/test-role/acme/directory"}, - } - testCtx := context.Background() - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - dirResp, err := client.Logical().ReadRawWithContext(testCtx, tc.directoryUrl) - require.Error(t, err, "expected failure reading ACME directory configuration got none") - - require.Equal(t, "application/problem+json", dirResp.Header.Get("Content-Type")) - require.Equal(t, http.StatusInternalServerError, dirResp.StatusCode) - - rawBodyBytes, err := io.ReadAll(dirResp.Body) - require.NoError(t, err, "failed reading from directory response body") - _ = dirResp.Body.Close() - - respType := map[string]interface{}{} - err = json.Unmarshal(rawBodyBytes, &respType) - require.NoError(t, err, "failed unmarshalling ACME directory response body") - - require.Equal(t, "urn:ietf:params:acme:error:serverInternal", respType["type"]) - require.NotEmpty(t, respType["detail"]) - }) - } -} - -// TestAcmeAccountsCrossingDirectoryPath make sure that if an account attempts to use a different ACME -// directory path that we get an error. -func TestAcmeAccountsCrossingDirectoryPath(t *testing.T) { - t.Parallel() - cluster, _, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - baseAcmeURL := "/v1/pki/acme/" - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - testCtx := context.Background() - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // Create new account - acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Try to update the account under another ACME directory - baseAcmeURL2 := "/v1/pki/roles/test-role/acme/" - acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL2, accountKey) - acct.Contact = []string{"mailto:test3@example.com"} - _, err = acmeClient2.UpdateReg(testCtx, acct) - require.Error(t, err, "successfully updated account when we should have failed due to different directory") - // We don't test for the specific error about using the wrong directory, as the golang library - // swallows the error we are sending back to a no account error -} - -// TestAcmeEabCrossingDirectoryPath make sure that if an account attempts to use a different ACME -// directory path that an EAB was created within we get an error. -func TestAcmeEabCrossingDirectoryPath(t *testing.T) { - t.Parallel() - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - // Enable EAB - _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ - "enabled": true, - "eab_policy": "always-required", - }) - require.NoError(t, err) - - baseAcmeURL := "/v1/pki/acme/" - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - testCtx := context.Background() - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // fetch a new EAB - kid, eabKeyBytes := getEABKey(t, client, "roles/test-role/acme/") - acct := &acme.Account{ - ExternalAccountBinding: &acme.ExternalAccountBinding{ - KID: kid, - Key: eabKeyBytes, - }, - } - - // Create new account - _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) - require.ErrorContains(t, err, "failed to verify eab", "should have failed as EAB is for a different directory") -} - -// TestAcmeDisabledWithEnvVar verifies if VAULT_DISABLE_PUBLIC_ACME is set that we completely -// disable the ACME service -func TestAcmeDisabledWithEnvVar(t *testing.T) { - // Setup a cluster with the configuration set to not-required, initially as the - // configuration will validate if the environment var is set - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - // Seal setup the environment variable, and unseal which now means we have a cluster - // with ACME configuration saying it is enabled with a bad EAB policy. - cluster.EnsureCoresSealed(t) - t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") - cluster.UnsealCores(t) - - // Make sure that ACME is disabled now. - for _, method := range []string{http.MethodHead, http.MethodGet} { - t.Run(fmt.Sprintf("%s", method), func(t *testing.T) { - req := client.NewRequest(method, "/v1/pki/acme/new-nonce") - _, err := client.RawRequestWithContext(ctx, req) - require.Error(t, err, "should have received an error as ACME should have been disabled") - - if apiError, ok := err.(*api.ResponseError); ok { - require.Equal(t, 404, apiError.StatusCode) - } - }) - } -} - -// TestAcmeConfigChecksPublicAcmeEnv verifies certain EAB policy values can not be set if ENV var is enabled -func TestAcmeConfigChecksPublicAcmeEnv(t *testing.T) { - t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") - cluster, client := setupTestPkiCluster(t) - defer cluster.Cleanup() - - _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/cluster", map[string]interface{}{ - "path": "https://dadgarcorp.com/v1/pki", - }) - require.NoError(t, err) - - _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ - "enabled": true, - "eab_policy": string(eabPolicyAlwaysRequired), - }) - require.NoError(t, err) - - for _, policyName := range []EabPolicyName{eabPolicyNewAccountRequired, eabPolicyNotRequired} { - _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ - "enabled": true, - "eab_policy": string(policyName), - }) - require.Error(t, err, "eab policy %s should have not been allowed to be set") - } - - // Make sure we can disable ACME and the eab policy is not checked - _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ - "enabled": false, - "eab_policy": string(eabPolicyNotRequired), - }) - require.NoError(t, err) -} - -// TestAcmeTruncatesToIssuerExpiry make sure that if the selected issuer's expiry is shorter than the -// CSR's selected TTL value in ACME and the issuer's leaf_not_after_behavior setting is set to Err, -// we will override the configured behavior and truncate to the issuer's NotAfter -func TestAcmeTruncatesToIssuerExpiry(t *testing.T) { - t.Parallel() - - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - testCtx := context.Background() - mount := "pki" - resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", - map[string]interface{}{ - "key_name": "short-key", - "key_type": "ec", - "common_name": "test.com", - }) - require.NoError(t, err, "failed creating intermediary CSR") - intermediateCSR := resp.Data["csr"].(string) - - // Sign the intermediate CSR using /pki - resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ - "csr": intermediateCSR, - "ttl": "10m", - "max_ttl": "1h", - }) - require.NoError(t, err, "failed signing intermediary CSR") - intermediateCertPEM := resp.Data["certificate"].(string) - - shortCa := parseCert(t, intermediateCertPEM) - - // Configure the intermediate cert as the CA in /pki2 - resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ - "pem_bundle": intermediateCertPEM, - }) - require.NoError(t, err, "failed importing intermediary cert") - importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) - require.Len(t, importedIssuersRaw, 1) - shortCaUuid := importedIssuersRaw[0].(string) - - _, err = client.Logical().Write(mount+"/issuer/"+shortCaUuid, map[string]interface{}{ - "leaf_not_after_behavior": "err", - "issuer_name": "short-ca", - }) - require.NoError(t, err, "failed updating issuer name") - - baseAcmeURL := "/v1/pki/issuer/short-ca/acme/" - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // Create new account - t.Logf("Testing register on %s", baseAcmeURL) - acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create an order - t.Logf("Testing Authorize Order on %s", baseAcmeURL) - identifiers := []string{"*.localdomain"} - order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ - {Type: "dns", Value: identifiers[0]}, - }) - require.NoError(t, err, "failed creating order") - - // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow - // test. - markAuthorizationSuccess(t, client, acmeClient, acct, order) - - // Build a proper CSR, with the correct name and signed with a different key works. - goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generated key for CSR") - csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) - require.NoError(t, err, "failed finalizing order") - require.Len(t, certs, 3, "expected full acme chain") - - testAcmeCertSignedByCa(t, client, certs, "short-ca") - - acmeCert, err := x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert") - - require.Equal(t, shortCa.NotAfter, acmeCert.NotAfter, "certificate times aren't the same") -} - -// TestAcmeIgnoresRoleExtKeyUsage -func TestAcmeIgnoresRoleExtKeyUsage(t *testing.T) { - t.Parallel() - - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - testCtx := context.Background() - - roleName := "test-role" - - roleOpt := map[string]interface{}{ - "ttl_duration": "365h", - "max_ttl_duration": "720h", - "key_type": "any", - "allowed_domains": "localdomain", - "allow_subdomains": "true", - "allow_wildcard_certificates": "true", - "require_cn": "true", /* explicit default */ - "server_flag": "true", - "client_flag": "true", - "code_signing_flag": "true", - "email_protection_flag": "true", - } - - _, err := client.Logical().Write("pki/roles/"+roleName, roleOpt) - - baseAcmeURL := "/v1/pki/roles/" + roleName + "/acme/" - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - require.NoError(t, err, "failed creating role test-role") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // Create new account - t.Logf("Testing register on %s", baseAcmeURL) - acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create an order - t.Logf("Testing Authorize Order on %s", baseAcmeURL) - identifiers := []string{"*.localdomain"} - order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ - {Type: "dns", Value: identifiers[0]}, - }) - require.NoError(t, err, "failed creating order") - - // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. - markAuthorizationSuccess(t, client, acmeClient, acct, order) - - // Build a proper CSR, with the correct name and signed with a different key works. - goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generated key for CSR") - csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) - require.NoError(t, err, "order finalization failed") - require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") - acmeCert, err := x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert") - - require.Equal(t, 1, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") - require.ElementsMatch(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, acmeCert.ExtKeyUsage, - "mismatch of ExtKeyUsage flags") -} - -func TestIssuerRoleDirectoryAssociations(t *testing.T) { - t.Parallel() - - // This creates two issuers for us (root-ca, int-ca) and two - // roles (test-role, acme) that we can use with various directory - // configurations. - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - // Setup DNS for validations. - testCtx := context.Background() - dns := dnstest.SetupResolver(t, "dadgarcorp.com") - defer dns.Cleanup() - _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ - "dns_resolver": dns.GetLocalAddr(), - }) - require.NoError(t, err, "failed to specify dns resolver") - - // 1. Use a forbidden role should fail. - resp, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ - "enabled": true, - "allowed_roles": []string{"acme"}, - }) - require.NoError(t, err, "failed to write config") - require.NotNil(t, resp) - - _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role under default issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") - - _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme under default issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme under root-ca issuer") - - // 2. Use a forbidden issuer should fail. - resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ - "allowed_roles": []string{"acme"}, - "allowed_issuers": []string{"int-ca"}, - }) - require.NoError(t, err, "failed to write config") - require.NotNil(t, resp) - - _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role under default issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") - require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") - - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") - require.Error(t, err, "failed to forbid usage of acme under root-ca issuer") - - _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme under default issuer") - _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") - require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") - - // 3. Setting the default directory to be a sign-verbatim policy and - // using two different CAs should result in certs signed by each CA. - resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ - "allowed_roles": []string{"*"}, - "allowed_issuers": []string{"*"}, - "default_directory_policy": "sign-verbatim", - }) - require.NoError(t, err, "failed to write config") - require.NotNil(t, resp) - - // default == int-ca - acmeClientDefault := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/default/acme/", nil) - defaultLeafCert := doACMEForDomainWithDNS(t, dns, acmeClientDefault, []string{"default-ca.dadgarcorp.com"}) - requireSignedByAtPath(t, client, defaultLeafCert, "pki/issuer/int-ca") - - acmeClientIntCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/int-ca/acme/", nil) - intCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientIntCA, []string{"int-ca.dadgarcorp.com"}) - requireSignedByAtPath(t, client, intCALeafCert, "pki/issuer/int-ca") - - acmeClientRootCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/root-ca/acme/", nil) - rootCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientRootCA, []string{"root-ca.dadgarcorp.com"}) - requireSignedByAtPath(t, client, rootCALeafCert, "pki/issuer/root-ca") - - // 4. Using a role-based default directory should allow us to control leaf - // issuance on the base and issuer-specific directories. - resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ - "allowed_roles": []string{"*"}, - "allowed_issuers": []string{"*"}, - "default_directory_policy": "role:acme", - }) - require.NoError(t, err, "failed to write config") - require.NotNil(t, resp) - - resp, err = client.Logical().JSONMergePatch(testCtx, "pki/roles/acme", map[string]interface{}{ - "ou": "IT Security", - "organization": []string{"Dadgar Corporation, Limited"}, - "allow_any_name": true, - }) - require.NoError(t, err, "failed to write role differentiator") - require.NotNil(t, resp) - - for _, issuer := range []string{"", "default", "int-ca", "root-ca"} { - // Path should override role. - directory := "/v1/pki/issuer/" + issuer + "/acme/" - issuerPath := "/pki/issuer/" + issuer - if issuer == "" { - directory = "/v1/pki/acme/" - issuerPath = "/pki/issuer/int-ca" - } else if issuer == "default" { - issuerPath = "/pki/issuer/int-ca" - } - - t.Logf("using directory: %v / issuer: %v", directory, issuerPath) - - acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) - leafCert := doACMEForDomainWithDNS(t, dns, acmeClient, []string{"role-restricted.dadgarcorp.com"}) - require.Contains(t, leafCert.Subject.Organization, "Dadgar Corporation, Limited", "on directory: %v", directory) - require.Contains(t, leafCert.Subject.OrganizationalUnit, "IT Security", "on directory: %v", directory) - requireSignedByAtPath(t, client, leafCert, issuerPath) - } - - // 5. -} - -// TestAcmeWithCsrIncludingBasicConstraintExtension verify that we error out for a CSR that is requesting a -// certificate with the IsCA set to true, false is okay, within the basic constraints extension and that no matter what -// the extension is not present on the returned certificate. -func TestAcmeWithCsrIncludingBasicConstraintExtension(t *testing.T) { - t.Parallel() - - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - testCtx := context.Background() - - baseAcmeURL := "/v1/pki/acme/" - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // Create new account - t.Logf("Testing register on %s", baseAcmeURL) - acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create an order - t.Logf("Testing Authorize Order on %s", baseAcmeURL) - identifiers := []string{"*.localdomain"} - order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ - {Type: "dns", Value: identifiers[0]}, - }) - require.NoError(t, err, "failed creating order") - - // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. - markAuthorizationSuccess(t, client, acmeClient, acct, order) - - // Build a CSR with IsCA set to true, making sure we reject it - extension, err := certutil.CreateBasicConstraintExtension(true, -1) - require.NoError(t, err, "failed generating basic constraint extension") - - isCATrueCSR := &x509.CertificateRequest{ - DNSNames: []string{identifiers[0]}, - ExtraExtensions: []pkix.Extension{extension}, - } - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generated key for CSR") - csr, err := x509.CreateCertificateRequest(rand.Reader, isCATrueCSR, csrKey) - require.NoError(t, err, "failed generating csr") - - _, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) - require.Error(t, err, "order finalization should have failed with IsCA set to true") - - extension, err = certutil.CreateBasicConstraintExtension(false, -1) - require.NoError(t, err, "failed generating basic constraint extension") - isCAFalseCSR := &x509.CertificateRequest{ - DNSNames: []string{identifiers[0]}, - Extensions: []pkix.Extension{extension}, - } - - csr, err = x509.CreateCertificateRequest(rand.Reader, isCAFalseCSR, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) - require.NoError(t, err, "order finalization should have failed with IsCA set to false") - - require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") - acmeCert, err := x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert") - - // Make sure we don't have any basic constraint extension within the returned cert - for _, ext := range acmeCert.Extensions { - if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { - // We shouldn't have this extension in our cert - t.Fatalf("acme csr contained a basic constraints extension") - } - } -} - -func markAuthorizationSuccess(t *testing.T, client *api.Client, acmeClient *acme.Client, acct *acme.Account, - order *acme.Order, -) { - testCtx := context.Background() - - pkiMount := findStorageMountUuid(t, client, "pki") - - // Delete any and all challenge validation entries to stop the engine from overwriting our hack here - i := 0 - for { - deleteCvEntries(t, client, pkiMount) - - accountId := acct.URI[strings.LastIndex(acct.URI, "/"):] - for _, authURI := range order.AuthzURLs { - authId := authURI[strings.LastIndex(authURI, "/"):] - - rawPath := path.Join("/sys/raw/logical/", pkiMount, getAuthorizationPath(accountId, authId)) - resp, err := client.Logical().ReadWithContext(testCtx, rawPath) - require.NoError(t, err, "failed looking up authorization storage") - require.NotNil(t, resp, "sys raw response was nil") - require.NotEmpty(t, resp.Data["value"], "no value field in sys raw response") - - var authz ACMEAuthorization - err = jsonutil.DecodeJSON([]byte(resp.Data["value"].(string)), &authz) - require.NoError(t, err, "error decoding authorization: %w", err) - authz.Status = ACMEAuthorizationValid - for _, challenge := range authz.Challenges { - challenge.Status = ACMEChallengeValid - } - - encodeJSON, err := jsonutil.EncodeJSON(authz) - require.NoError(t, err, "failed encoding authz json") - _, err = client.Logical().WriteWithContext(testCtx, rawPath, map[string]interface{}{ - "value": base64.StdEncoding.EncodeToString(encodeJSON), - "encoding": "base64", - }) - require.NoError(t, err, "failed writing authorization storage") - } - - // Give some time - time.Sleep(200 * time.Millisecond) - - // Check to see if we have fixed up the status and no new entries have appeared. - if !deleteCvEntries(t, client, pkiMount) { - // No entries found - // Look to see if we raced against the engine - orderLookup, err := acmeClient.GetOrder(testCtx, order.URI) - require.NoError(t, err, "failed loading order status after manually ") - - if orderLookup.Status == string(ACMEOrderReady) { - // Our order seems to be in the proper status, should be safe-ish to go ahead now - break - } else { - t.Logf("order status was not ready, retrying") - } - } else { - t.Logf("new challenge entries appeared after deletion, retrying") - } - - if i > 5 { - t.Fatalf("We are constantly deleting cv entries or order status is not changing, something is wrong") - } - - i++ - } -} - -func deleteCvEntries(t *testing.T, client *api.Client, pkiMount string) bool { - testCtx := context.Background() - - cvPath := path.Join("/sys/raw/logical/", pkiMount, acmeValidationPrefix) - resp, err := client.Logical().ListWithContext(testCtx, cvPath) - require.NoError(t, err, "failed listing cv path items") - - deletedEntries := false - if resp != nil { - cvEntries := resp.Data["keys"].([]interface{}) - for _, cvEntry := range cvEntries { - cvEntryPath := path.Join(cvPath, cvEntry.(string)) - _, err = client.Logical().DeleteWithContext(testCtx, cvEntryPath) - require.NoError(t, err, "failed to delete cv entry") - deletedEntries = true - } - } - - return deletedEntries -} - -func setupAcmeBackend(t *testing.T) (*vault.TestCluster, *api.Client, string) { - cluster, client := setupTestPkiCluster(t) - - return setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki") -} - -func setupAcmeBackendOnClusterAtPath(t *testing.T, cluster *vault.TestCluster, client *api.Client, mount string) (*vault.TestCluster, *api.Client, string) { - mount = strings.Trim(mount, "/") - - // Setting templated AIAs should succeed. - pathConfig := client.Address() + "/v1/" + mount - - namespace := "" - mountName := mount - if mount != "pki" { - if strings.Contains(mount, "/") && constants.IsEnterprise { - ns_pieces := strings.Split(mount, "/") - c := len(ns_pieces) - // mount is c-1 - ns_name := ns_pieces[c-2] - if len(ns_pieces) > 2 { - // Parent's namespaces - parent := strings.Join(ns_pieces[0:c-2], "/") - _, err := client.WithNamespace(parent).Logical().Write("/sys/namespaces/"+ns_name, nil) - require.NoError(t, err, "failed to create nested namespaces "+parent+" -> "+ns_name) - } else { - _, err := client.Logical().Write("/sys/namespaces/"+ns_name, nil) - require.NoError(t, err, "failed to create nested namespace "+ns_name) - } - namespace = strings.Join(ns_pieces[0:c-1], "/") - mountName = ns_pieces[c-1] - } - - err := client.WithNamespace(namespace).Sys().Mount(mountName, &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "3000h", - MaxLeaseTTL: "600000h", - }, - }) - require.NoError(t, err, "failed to mount new PKI instance at "+mount) - } - - err := client.Sys().TuneMountWithContext(ctx, mountName, api.MountConfigInput{ - DefaultLeaseTTL: "3000h", - MaxLeaseTTL: "600000h", - }) - require.NoError(t, err, "failed updating mount lease times "+mount) - - _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/cluster", map[string]interface{}{ - "path": pathConfig, - "aia_path": "http://localhost:8200/cdn/" + mount, - }) - require.NoError(t, err) - - _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/acme", map[string]interface{}{ - "enabled": true, - "eab_policy": "not-required", - }) - require.NoError(t, err) - - // Allow certain headers to pass through for ACME support - _, err = client.WithNamespace(namespace).Logical().WriteWithContext(context.Background(), "sys/mounts/"+mountName+"/tune", map[string]interface{}{ - "allowed_response_headers": []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, - "max_lease_ttl": "920000h", - }) - require.NoError(t, err, "failed tuning mount response headers") - - resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/root/internal", - map[string]interface{}{ - "issuer_name": "root-ca", - "key_name": "root-key", - "key_type": "ec", - "common_name": "root.com", - "ttl": "7200h", - "max_ttl": "920000h", - }) - require.NoError(t, err, "failed creating root CA") - - resp, err = client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", - map[string]interface{}{ - "key_name": "int-key", - "key_type": "ec", - "common_name": "test.com", - }) - require.NoError(t, err, "failed creating intermediary CSR") - intermediateCSR := resp.Data["csr"].(string) - - // Sign the intermediate CSR using /pki - resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ - "csr": intermediateCSR, - "ttl": "7100h", - "max_ttl": "910000h", - }) - require.NoError(t, err, "failed signing intermediary CSR") - intermediateCertPEM := resp.Data["certificate"].(string) - - // Configure the intermediate cert as the CA in /pki2 - resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ - "pem_bundle": intermediateCertPEM, - }) - require.NoError(t, err, "failed importing intermediary cert") - importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) - require.Len(t, importedIssuersRaw, 1) - intCaUuid := importedIssuersRaw[0].(string) - - _, err = client.Logical().Write(mount+"/issuer/"+intCaUuid, map[string]interface{}{ - "issuer_name": "int-ca", - }) - require.NoError(t, err, "failed updating issuer name") - - _, err = client.Logical().Write(mount+"/config/issuers", map[string]interface{}{ - "default": "int-ca", - }) - require.NoError(t, err, "failed updating default issuer") - - _, err = client.Logical().Write(mount+"/roles/test-role", map[string]interface{}{ - "ttl_duration": "168h", - "max_ttl_duration": "168h", - "key_type": "any", - "allowed_domains": "localdomain", - "allow_subdomains": "true", - "allow_wildcard_certificates": "true", - }) - require.NoError(t, err, "failed creating role test-role") - - _, err = client.Logical().Write(mount+"/roles/acme", map[string]interface{}{ - "ttl_duration": "3650h", - "max_ttl_duration": "7200h", - "key_type": "any", - }) - require.NoError(t, err, "failed creating role acme") - - return cluster, client, pathConfig -} - -func testAcmeCertSignedByCa(t *testing.T, client *api.Client, derCerts [][]byte, issuerRef string) { - t.Helper() - require.NotEmpty(t, derCerts) - acmeCert, err := x509.ParseCertificate(derCerts[0]) - require.NoError(t, err, "failed parsing acme cert bytes") - - resp, err := client.Logical().ReadWithContext(context.Background(), "pki/issuer/"+issuerRef) - require.NoError(t, err, "failed reading issuer with name %s", issuerRef) - issuerCert := parseCert(t, resp.Data["certificate"].(string)) - issuerChainRaw := resp.Data["ca_chain"].([]interface{}) - - err = acmeCert.CheckSignatureFrom(issuerCert) - require.NoError(t, err, "issuer %s did not sign provided cert", issuerRef) - - expectedCerts := [][]byte{derCerts[0]} - - for _, entry := range issuerChainRaw { - chainCert := parseCert(t, entry.(string)) - expectedCerts = append(expectedCerts, chainCert.Raw) - } - - if diffs := deep.Equal(expectedCerts, derCerts); diffs != nil { - t.Fatalf("diffs were found between the acme chain returned and the expected value: \n%v", diffs) - } -} - -// TestAcmeValidationError make sure that we properly return errors on validation errors. -func TestAcmeValidationError(t *testing.T) { - t.Parallel() - cluster, _, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - testCtx := context.Background() - baseAcmeURL := "/v1/pki/acme/" - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // Create new account - t.Logf("Testing register on %s", baseAcmeURL) - _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create an order - t.Logf("Testing Authorize Order on %s", baseAcmeURL) - identifiers := []string{"www.dadgarcorp.com"} - order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ - {Type: "dns", Value: identifiers[0]}, - }) - require.NoError(t, err, "failed creating order") - - // Load authorizations - var authorizations []*acme.Authorization - for _, authUrl := range order.AuthzURLs { - auth, err := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, err, "failed fetching authorization: %s", authUrl) - - authorizations = append(authorizations, auth) - } - require.Len(t, authorizations, 1, "expected a certain number of authorizations") - require.Len(t, authorizations[0].Challenges, 3, "expected a certain number of challenges associated with authorization") - - acceptedAuth, err := acmeClient.Accept(testCtx, authorizations[0].Challenges[0]) - require.NoError(t, err, "Should have been allowed to accept challenge 1") - require.Equal(t, string(ACMEChallengeProcessing), acceptedAuth.Status) - - _, err = acmeClient.Accept(testCtx, authorizations[0].Challenges[1]) - require.Error(t, err, "Should have been prevented to accept challenge 2") - - // Make sure our challenge returns errors - testhelpers.RetryUntil(t, 30*time.Second, func() error { - challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) - if err != nil { - return err - } - - if challenge.Error == nil { - return fmt.Errorf("no error set in challenge yet") - } - - acmeError, ok := challenge.Error.(*acme.Error) - if !ok { - return fmt.Errorf("unexpected error back: %v", err) - } - - if acmeError.ProblemType != "urn:ietf:params:acme:error:incorrectResponse" { - return fmt.Errorf("unexpected ACME error back: %v", acmeError) - } - - return nil - }) - - // Make sure our challenge,auth and order status change. - // This takes a little too long to run in CI properly, we need the ability to influence - // how long the validations take before CI can go wild on this. - if os.Getenv("CI") == "" { - testhelpers.RetryUntil(t, 10*time.Minute, func() error { - challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) - if err != nil { - return fmt.Errorf("failed to load challenge: %w", err) - } - - if challenge.Status != string(ACMEChallengeInvalid) { - return fmt.Errorf("challenge state was not changed to invalid: %v", challenge) - } - - authz, err := acmeClient.GetAuthorization(testCtx, authorizations[0].URI) - if err != nil { - return fmt.Errorf("failed to load authorization: %w", err) - } - - if authz.Status != string(ACMEAuthorizationInvalid) { - return fmt.Errorf("authz state was not changed to invalid: %v", authz) - } - - myOrder, err := acmeClient.GetOrder(testCtx, order.URI) - if err != nil { - return fmt.Errorf("failed to load order: %w", err) - } - - if myOrder.Status != string(ACMEOrderInvalid) { - return fmt.Errorf("order state was not changed to invalid: %v", order) - } - - return nil - }) - } -} - -func setupTestPkiCluster(t *testing.T) (*vault.TestCluster, *api.Client) { - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - EnableRaw: true, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - client := cluster.Cores[0].Client - mountPKIEndpoint(t, client, "pki") - return cluster, client -} - -func getAcmeClientForCluster(t *testing.T, cluster *vault.TestCluster, baseUrl string, key crypto.Signer) *acme.Client { - coreAddr := cluster.Cores[0].Listeners[0].Address - tlsConfig := cluster.Cores[0].TLSConfig() - - transport := cleanhttp.DefaultPooledTransport() - transport.TLSClientConfig = tlsConfig.Clone() - if err := http2.ConfigureTransport(transport); err != nil { - t.Fatal(err) - } - httpClient := &http.Client{Transport: transport} - if baseUrl[0] == '/' { - baseUrl = baseUrl[1:] - } - if !strings.HasPrefix(baseUrl, "v1/") { - baseUrl = "v1/" + baseUrl - } - baseAcmeURL := fmt.Sprintf("https://%s/%s", coreAddr.String(), baseUrl) - return &acme.Client{ - Key: key, - HTTPClient: httpClient, - DirectoryURL: baseAcmeURL + "directory", - } -} - -func getEABKey(t *testing.T, client *api.Client, baseUrl string) (string, []byte) { - resp, err := client.Logical().WriteWithContext(ctx, path.Join("pki/", baseUrl, "/new-eab"), map[string]interface{}{}) - require.NoError(t, err, "failed getting eab key") - require.NotNil(t, resp, "eab key returned nil response") - require.NotEmpty(t, resp.Data["id"], "eab key response missing id field") - kid := resp.Data["id"].(string) - - require.NotEmpty(t, resp.Data["key"], "eab key response missing private_key field") - base64Key := resp.Data["key"].(string) - require.True(t, strings.HasPrefix(base64Key, "vault-eab-0-"), "%s should have had a prefix of vault-eab-0-", base64Key) - privateKeyBytes, err := base64.RawURLEncoding.DecodeString(base64Key) - require.NoError(t, err, "failed base 64 decoding eab key response") - - require.Equal(t, "hs", resp.Data["key_type"], "eab key_type field mis-match") - require.Equal(t, baseUrl+"directory", resp.Data["acme_directory"], "eab acme_directory field mis-match") - require.NotEmpty(t, resp.Data["created_on"], "empty created_on field") - _, err = time.Parse(time.RFC3339, resp.Data["created_on"].(string)) - require.NoError(t, err, "failed parsing eab created_on field") - - return kid, privateKeyBytes -} - -func TestACMEClientRequestLimits(t *testing.T) { - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - cases := []struct { - name string - authorizations []acme.AuthzID - requestCSR x509.CertificateRequest - valid bool - }{ - { - "validate-only-cn", - []acme.AuthzID{ - {"dns", "localhost"}, - }, - x509.CertificateRequest{ - Subject: pkix.Name{CommonName: "localhost"}, - }, - true, - }, - { - "validate-only-san", - []acme.AuthzID{ - {"dns", "localhost"}, - }, - x509.CertificateRequest{ - DNSNames: []string{"localhost"}, - }, - true, - }, - { - "validate-only-ip-address", - []acme.AuthzID{ - {"ip", "127.0.0.1"}, - }, - x509.CertificateRequest{ - IPAddresses: []net.IP{{127, 0, 0, 1}}, - }, - true, - }, - } - - testCtx := context.Background() - acmeConfig := map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "default_directory_policy": "sign-verbatim", - "dns_resolver": "", - "eab_policy_name": "", - } - _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig) - require.NoError(t, err, "error configuring acme") - - for _, tc := range cases { - - // First Create Our Client - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey) - - discovery, err := acmeClient.Discover(testCtx) - require.NoError(t, err, "failed acme discovery call") - t.Logf("%v", discovery) - - acct, err := acmeClient.Register(testCtx, &acme.Account{ - Contact: []string{"mailto:test@example.com"}, - }, func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - require.Equal(t, acme.StatusValid, acct.Status) - require.Contains(t, acct.Contact, "mailto:test@example.com") - require.Len(t, acct.Contact, 1) - - // Create an order - t.Logf("Testing Authorize Order on %s", "pki/acme") - identifiers := make([]string, len(tc.authorizations)) - for index, auth := range tc.authorizations { - identifiers[index] = auth.Value - } - - createOrder, err := acmeClient.AuthorizeOrder(testCtx, tc.authorizations) - require.NoError(t, err, "failed creating order") - require.Equal(t, acme.StatusPending, createOrder.Status) - require.Empty(t, createOrder.CertURL) - require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) - require.Len(t, createOrder.AuthzURLs, len(tc.authorizations), "expected same number of authzurls as identifiers") - - // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow - // test. - markAuthorizationSuccess(t, client, acmeClient, acct, createOrder) - - // Submit the CSR - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, err, "failed generated key for CSR") - csr, err := x509.CreateCertificateRequest(rand.Reader, &tc.requestCSR, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) - - if tc.valid { - require.NoError(t, err, "failed finalizing order") - - // Validate we get a signed cert back - testAcmeCertSignedByCa(t, client, certs, "int-ca") - } else { - require.Error(t, err, "Not a valid CSR, should err") - } - } -} diff --git a/builtin/logical/pki/path_config_acme.go b/builtin/logical/pki/path_config_acme.go deleted file mode 100644 index 6b1b78bc37fe0..0000000000000 --- a/builtin/logical/pki/path_config_acme.go +++ /dev/null @@ -1,372 +0,0 @@ -package pki - -import ( - "context" - "fmt" - "net" - "os" - "strconv" - "strings" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/errutil" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - storageAcmeConfig = "config/acme" - pathConfigAcmeHelpSyn = "Configuration of ACME Endpoints" - pathConfigAcmeHelpDesc = "Here we configure:\n\nenabled=false, whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support,\nallowed_issuers=\"default\", which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer,\nallowed_roles=\"*\", which roles are allowed for use with ACME; by default these will be all roles matching our selection criteria,\ndefault_directory_policy=\"\", either \"forbid\", preventing the default directory from being used at all, \"role:\" which is the role to be used for non-role-qualified ACME requests; or \"sign-verbatim\", the default meaning ACME issuance will be equivalent to sign-verbatim.,\ndns_resolver=\"\", which specifies a custom DNS resolver to use for all ACME-related DNS lookups" - disableAcmeEnvVar = "VAULT_DISABLE_PUBLIC_ACME" -) - -type acmeConfigEntry struct { - Enabled bool `json:"enabled"` - AllowedIssuers []string `json:"allowed_issuers="` - AllowedRoles []string `json:"allowed_roles"` - DefaultDirectoryPolicy string `json:"default_directory_policy"` - DNSResolver string `json:"dns_resolver"` - EabPolicyName EabPolicyName `json:"eab_policy_name"` -} - -var defaultAcmeConfig = acmeConfigEntry{ - Enabled: false, - AllowedIssuers: []string{"*"}, - AllowedRoles: []string{"*"}, - DefaultDirectoryPolicy: "sign-verbatim", - DNSResolver: "", - EabPolicyName: eabPolicyNotRequired, -} - -func (sc *storageContext) getAcmeConfig() (*acmeConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageAcmeConfig) - if err != nil { - return nil, err - } - - var mapping acmeConfigEntry - if entry == nil { - mapping = defaultAcmeConfig - return &mapping, nil - } - - if err := entry.DecodeJSON(&mapping); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode ACME configuration: %v", err)} - } - - return &mapping, nil -} - -func (sc *storageContext) setAcmeConfig(entry *acmeConfigEntry) error { - json, err := logical.StorageEntryJSON(storageAcmeConfig, entry) - if err != nil { - return fmt.Errorf("failed creating storage entry: %w", err) - } - - if err := sc.Storage.Put(sc.Context, json); err != nil { - return fmt.Errorf("failed writing storage entry: %w", err) - } - - sc.Backend.acmeState.markConfigDirty() - return nil -} - -func pathAcmeConfig(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/acme", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - - Fields: map[string]*framework.FieldSchema{ - "enabled": { - Type: framework.TypeBool, - Description: `whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support`, - Default: false, - }, - "allowed_issuers": { - Type: framework.TypeCommaStringSlice, - Description: `which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer`, - Default: []string{"*"}, - }, - "allowed_roles": { - Type: framework.TypeCommaStringSlice, - Description: `which roles are allowed for use with ACME; by default via '*', these will be all roles including sign-verbatim; when concrete role names are specified, any default_directory_policy role must be included to allow usage of the default acme directories under /pki/acme/directory and /pki/issuer/:issuer_id/acme/directory.`, - Default: []string{"*"}, - }, - "default_directory_policy": { - Type: framework.TypeString, - Description: `the policy to be used for non-role-qualified ACME requests; by default ACME issuance will be otherwise unrestricted, equivalent to the sign-verbatim endpoint; one may also specify a role to use as this policy, as "role:", the specified role must be allowed by allowed_roles`, - Default: "sign-verbatim", - }, - "dns_resolver": { - Type: framework.TypeString, - Description: `DNS resolver to use for domain resolution on this mount. Defaults to using the default system resolver. Must be in the format :, with both parts mandatory.`, - Default: "", - }, - "eab_policy": { - Type: framework.TypeString, - Description: `Specify the policy to use for external account binding behaviour, 'not-required', 'new-account-required' or 'always-required'`, - Default: "always-required", - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "acme-configuration", - }, - Callback: b.pathAcmeRead, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathAcmeWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "acme", - }, - // Read more about why these flags are set in backend.go. - ForwardPerformanceStandby: true, - ForwardPerformanceSecondary: true, - }, - }, - - HelpSynopsis: pathConfigAcmeHelpSyn, - HelpDescription: pathConfigAcmeHelpDesc, - } -} - -func (b *backend) pathAcmeRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getAcmeConfig() - if err != nil { - return nil, err - } - - var warnings []string - if config.Enabled { - _, err := getBasePathFromClusterConfig(sc) - if err != nil { - warnings = append(warnings, err.Error()) - } - } - - return genResponseFromAcmeConfig(config, warnings), nil -} - -func genResponseFromAcmeConfig(config *acmeConfigEntry, warnings []string) *logical.Response { - response := &logical.Response{ - Data: map[string]interface{}{ - "allowed_roles": config.AllowedRoles, - "allowed_issuers": config.AllowedIssuers, - "default_directory_policy": config.DefaultDirectoryPolicy, - "enabled": config.Enabled, - "dns_resolver": config.DNSResolver, - "eab_policy": config.EabPolicyName, - }, - Warnings: warnings, - } - - // TODO: Add some nice warning if we are on a replication cluster and path isn't set - - return response -} - -func (b *backend) pathAcmeWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - - config, err := sc.getAcmeConfig() - if err != nil { - return nil, err - } - - if enabledRaw, ok := d.GetOk("enabled"); ok { - config.Enabled = enabledRaw.(bool) - } - - if allowedRolesRaw, ok := d.GetOk("allowed_roles"); ok { - config.AllowedRoles = allowedRolesRaw.([]string) - if len(config.AllowedRoles) == 0 { - return nil, fmt.Errorf("allowed_roles must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") - } - } - - if defaultDirectoryPolicyRaw, ok := d.GetOk("default_directory_policy"); ok { - config.DefaultDirectoryPolicy = defaultDirectoryPolicyRaw.(string) - } - - if allowedIssuersRaw, ok := d.GetOk("allowed_issuers"); ok { - config.AllowedIssuers = allowedIssuersRaw.([]string) - if len(config.AllowedIssuers) == 0 { - return nil, fmt.Errorf("allowed_issuers must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") - } - } - - if dnsResolverRaw, ok := d.GetOk("dns_resolver"); ok { - config.DNSResolver = dnsResolverRaw.(string) - if config.DNSResolver != "" { - addr, _, err := net.SplitHostPort(config.DNSResolver) - if err != nil { - return nil, fmt.Errorf("failed to parse DNS resolver address: %w", err) - } - if addr == "" { - return nil, fmt.Errorf("failed to parse DNS resolver address: got empty address") - } - if net.ParseIP(addr) == nil { - return nil, fmt.Errorf("failed to parse DNS resolver address: expected IPv4/IPv6 address, likely got hostname") - } - } - } - - if eabPolicyRaw, ok := d.GetOk("eab_policy"); ok { - eabPolicy, err := getEabPolicyByString(eabPolicyRaw.(string)) - if err != nil { - return nil, fmt.Errorf("invalid eab policy name provided, valid values are '%s', '%s', '%s'", - eabPolicyNotRequired, eabPolicyNewAccountRequired, eabPolicyAlwaysRequired) - } - config.EabPolicyName = eabPolicy.Name - } - - // Validate Default Directory Behavior: - defaultDirectoryPolicyType, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) - if err != nil { - return nil, fmt.Errorf("invalid default_directory_policy: %w", err) - } - defaultDirectoryRoleName := "" - switch defaultDirectoryPolicyType { - case Forbid: - case SignVerbatim: - case Role: - defaultDirectoryRoleName, err = getDefaultDirectoryPolicyRole(config.DefaultDirectoryPolicy) - if err != nil { - return nil, fmt.Errorf("failed extracting role name from default directory policy %w", err) - } - - _, err := getAndValidateAcmeRole(sc, defaultDirectoryRoleName) - if err != nil { - return nil, fmt.Errorf("default directory policy role %v is not a valid ACME role: %w", defaultDirectoryRoleName, err) - } - default: - return nil, fmt.Errorf("validation for the type of policy defined by %v is undefined", config.DefaultDirectoryPolicy) - } - - // Validate Allowed Roles - allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" - foundDefault := false - if !allowAnyRole { - for index, name := range config.AllowedRoles { - if name == "*" { - return nil, fmt.Errorf("cannot use '*' as role name at index %d", index) - } - - _, err := getAndValidateAcmeRole(sc, name) - if err != nil { - return nil, fmt.Errorf("allowed_role %v is not a valid acme role: %w", name, err) - } - - if defaultDirectoryPolicyType == Role && name == defaultDirectoryRoleName { - foundDefault = true - } - } - - if !foundDefault && defaultDirectoryPolicyType == Role { - return nil, fmt.Errorf("default directory policy %v was not specified in allowed_roles: %v", config.DefaultDirectoryPolicy, config.AllowedRoles) - } - } - - allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" - if !allowAnyIssuer { - for index, name := range config.AllowedIssuers { - if name == "*" { - return nil, fmt.Errorf("cannot use '*' as issuer name at index %d", index) - } - - _, err := sc.resolveIssuerReference(name) - if err != nil { - return nil, fmt.Errorf("failed validating allowed_issuers: unable to fetch issuer: %v: %w", name, err) - } - } - } - - // Check to make sure that we have a proper value for the cluster path which ACME requires - if config.Enabled { - _, err = getBasePathFromClusterConfig(sc) - if err != nil { - return nil, err - } - } - - var warnings []string - // Lastly lets verify that the configuration is honored/invalidated by the public ACME env var. - isPublicAcmeDisabledByEnv, err := isPublicACMEDisabledByEnv() - if err != nil { - warnings = append(warnings, err.Error()) - } - if isPublicAcmeDisabledByEnv && config.Enabled { - eabPolicy := getEabPolicyByName(config.EabPolicyName) - if !eabPolicy.OverrideEnvDisablingPublicAcme() { - resp := logical.ErrorResponse("%s env var is enabled, ACME EAB policy needs to be '%s' with ACME enabled", - disableAcmeEnvVar, eabPolicyAlwaysRequired) - resp.Warnings = warnings - return resp, nil - } - } - - err = sc.setAcmeConfig(config) - if err != nil { - return nil, err - } - - return genResponseFromAcmeConfig(config, warnings), nil -} - -func isPublicACMEDisabledByEnv() (bool, error) { - disableAcmeRaw, ok := os.LookupEnv(disableAcmeEnvVar) - if !ok { - return false, nil - } - - disableAcme, err := strconv.ParseBool(disableAcmeRaw) - if err != nil { - // So the environment variable was set but we couldn't parse the value as a string, assume - // the operator wanted public ACME disabled. - return true, fmt.Errorf("failed parsing environment variable %s: %w", disableAcmeEnvVar, err) - } - - return disableAcme, nil -} - -func getDefaultDirectoryPolicyType(defaultDirectoryPolicy string) (DefaultDirectoryPolicyType, error) { - switch { - case defaultDirectoryPolicy == "forbid": - return Forbid, nil - case defaultDirectoryPolicy == "sign-verbatim": - return SignVerbatim, nil - case strings.HasPrefix(defaultDirectoryPolicy, "role:"): - if len(defaultDirectoryPolicy) == 5 { - return Forbid, fmt.Errorf("no role specified by policy %v", defaultDirectoryPolicy) - } - return Role, nil - default: - return Forbid, fmt.Errorf("string %v not a valid Default Directory Policy", defaultDirectoryPolicy) - } -} - -func getDefaultDirectoryPolicyRole(defaultDirectoryPolicy string) (string, error) { - policyType, err := getDefaultDirectoryPolicyType(defaultDirectoryPolicy) - if err != nil { - return "", err - } - if policyType != Role { - return "", fmt.Errorf("default directory policy %v is not a role-based-policy", defaultDirectoryPolicy) - } - return defaultDirectoryPolicy[5:], nil -} - -type DefaultDirectoryPolicyType int - -const ( - Forbid DefaultDirectoryPolicyType = iota - SignVerbatim - Role -) diff --git a/builtin/logical/pki/path_config_acme_test.go b/builtin/logical/pki/path_config_acme_test.go deleted file mode 100644 index 93be569d667d4..0000000000000 --- a/builtin/logical/pki/path_config_acme_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestAcmeConfig(t *testing.T) { - cluster, client, _ := setupAcmeBackend(t) - defer cluster.Cleanup() - - cases := []struct { - name string - AcmeConfig map[string]interface{} - prefixUrl string - validConfig bool - works bool - }{ - {"unspecified-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", true, true}, - {"bad-policy-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "default_directory_policy": "bad", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", false, false}, - {"forbid-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "default_directory_policy": "forbid", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", true, false}, - {"sign-verbatim-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "default_directory_policy": "sign-verbatim", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", true, true}, - {"role-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "default_directory_policy": "role:exists", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", true, true}, - {"bad-role-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "*", - "default_directory_policy": "role:notgood", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", false, true}, - {"disallowed-role-root", map[string]interface{}{ - "enabled": true, - "allowed_issuers": "*", - "allowed_roles": "good", - "default_directory_policy": "role:exists", - "dns_resolver": "", - "eab_policy_name": "", - }, "acme/", false, false}, - } - - roleConfig := map[string]interface{}{ - "issuer_ref": "default", - "allowed_domains": "example.com", - "allow_subdomains": true, - "max_ttl": "720h", - } - - testCtx := context.Background() - - for _, tc := range cases { - deadline := time.Now().Add(time.Second * 10) - subTestCtx, _ := context.WithDeadline(testCtx, deadline) - - _, err := client.Logical().WriteWithContext(subTestCtx, "pki/roles/exists", roleConfig) - require.NoError(t, err) - _, err = client.Logical().WriteWithContext(subTestCtx, "pki/roles/good", roleConfig) - require.NoError(t, err) - - t.Run(tc.name, func(t *testing.T) { - _, err := client.Logical().WriteWithContext(subTestCtx, "pki/config/acme", tc.AcmeConfig) - - if tc.validConfig { - require.NoError(t, err) - } else { - require.Error(t, err) - return - } - - _, err = client.Logical().ReadWithContext(subTestCtx, "pki/acme/directory") - if tc.works { - require.NoError(t, err) - - baseAcmeURL := "/v1/pki/" + tc.prefixUrl - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa key") - - acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) - - // Create new account - _, err = acmeClient.Discover(subTestCtx) - require.NoError(t, err, "failed acme discovery call") - } else { - require.Error(t, err, "Acme Configuration should prevent usage") - } - - t.Logf("Completed case %v", tc.name) - }) - } -} diff --git a/builtin/logical/pki/path_config_ca.go b/builtin/logical/pki/path_config_ca.go index e77386f4b1876..f7a7706455cb1 100644 --- a/builtin/logical/pki/path_config_ca.go +++ b/builtin/logical/pki/path_config_ca.go @@ -1,26 +1,16 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" - "net/http" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "configure", - OperationSuffix: "ca", - }, - Fields: map[string]*framework.FieldSchema{ "pem_bundle": { Type: framework.TypeString, @@ -32,38 +22,6 @@ secret key and certificate.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathImportIssuers, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "mapping": { - Type: framework.TypeMap, - Description: "A mapping of issuer_id to key_id for all issuers included in this request", - Required: true, - }, - "imported_keys": { - Type: framework.TypeCommaStringSlice, - Description: "Net-new keys imported as a part of this request", - Required: true, - }, - "imported_issuers": { - Type: framework.TypeCommaStringSlice, - Description: "Net-new issuers imported as a part of this request", - Required: true, - }, - "existing_keys": { - Type: framework.TypeCommaStringSlice, - Description: "Existing keys specified as part of the import bundle of this request", - Required: true, - }, - "existing_issuers": { - Type: framework.TypeCommaStringSlice, - Description: "Existing issuers specified as part of the import bundle of this request", - Required: true, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -90,11 +48,6 @@ For security reasons, the secret key cannot be retrieved later. func pathConfigIssuers(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/issuers", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - Fields: map[string]*framework.FieldSchema{ defaultRef: { Type: framework.TypeString, @@ -106,51 +59,13 @@ func pathConfigIssuers(b *backend) *framework.Path { Default: false, }, }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathCAIssuersRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "issuers-configuration", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "default": { - Type: framework.TypeString, - Description: `Reference (name or identifier) to the default issuer.`, - Required: true, - }, - "default_follows_latest_issuer": { - Type: framework.TypeBool, - Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, - Required: true, - }, - }, - }}, - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAIssuersWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "issuers", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "default": { - Type: framework.TypeString, - Description: `Reference (name or identifier) to the default issuer.`, - }, - "default_follows_latest_issuer": { - Type: framework.TypeBool, - Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -165,13 +80,6 @@ func pathConfigIssuers(b *backend) *framework.Path { func pathReplaceRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "root/replace", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "replace", - OperationSuffix: "root", - }, - Fields: map[string]*framework.FieldSchema{ "default": { Type: framework.TypeString, @@ -183,23 +91,6 @@ func pathReplaceRoot(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAIssuersWrite, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "default": { - Type: framework.TypeString, - Description: `Reference (name or identifier) to the default issuer.`, - Required: true, - }, - "default_follows_latest_issuer": { - Type: framework.TypeBool, - Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, - Required: true, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -254,6 +145,7 @@ func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, parsedIssuer, err := sc.resolveIssuerReference(newDefault) if err != nil { return logical.ErrorResponse("Error resolving issuer reference: " + err.Error()), nil + return nil, errutil.UserError{Err: "Error resolving issuer reference: " + err.Error()} } entry, err := sc.fetchIssuerById(parsedIssuer) if err != nil { @@ -309,11 +201,6 @@ value of the issuer with the name "next", if it exists. func pathConfigKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/keys", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - Fields: map[string]*framework.FieldSchema{ defaultRef: { Type: framework.TypeString, @@ -323,42 +210,12 @@ func pathConfigKeys(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathKeyDefaultWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "keys", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "default": { - Type: framework.TypeString, - Description: `Reference (name or identifier) to the default issuer.`, - Required: true, - }, - }, - }}, - }, + Callback: b.pathKeyDefaultWrite, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathKeyDefaultRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "keys-configuration", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "default": { - Type: framework.TypeString, - Description: `Reference (name or identifier) to the default issuer.`, - }, - }, - }}, - }, + Callback: b.pathKeyDefaultRead, ForwardPerformanceStandby: false, ForwardPerformanceSecondary: false, }, diff --git a/builtin/logical/pki/path_config_cluster.go b/builtin/logical/pki/path_config_cluster.go deleted file mode 100644 index 4bdfb820576ad..0000000000000 --- a/builtin/logical/pki/path_config_cluster.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "fmt" - "net/http" - - "github.com/asaskevich/govalidator" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigCluster(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/cluster", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - - Fields: map[string]*framework.FieldSchema{ - "path": { - Type: framework.TypeString, - Description: `Canonical URI to this mount on this performance -replication cluster's external address. This is for resolving AIA URLs and -providing the {{cluster_path}} template parameter but might be used for other -purposes in the future. - -This should only point back to this particular PR replica and should not ever -point to another PR cluster. It may point to any node in the PR replica, -including standby nodes, and need not always point to the active node. - -For example: https://pr1.vault.example.com:8200/v1/pki`, - }, - "aia_path": { - Type: framework.TypeString, - Description: `Optional URI to this mount's AIA distribution -point; may refer to an external non-Vault responder. This is for resolving AIA -URLs and providing the {{cluster_aia_path}} template parameter and will not -be used for other purposes. As such, unlike path above, this could safely -be an insecure transit mechanism (like HTTP without TLS). - -For example: http://cdn.example.com/pr1/pki`, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "cluster", - }, - Callback: b.pathWriteCluster, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "path": { - Type: framework.TypeString, - Description: `Canonical URI to this mount on this performance -replication cluster's external address. This is for resolving AIA URLs and -providing the {{cluster_path}} template parameter but might be used for other -purposes in the future. - -This should only point back to this particular PR replica and should not ever -point to another PR cluster. It may point to any node in the PR replica, -including standby nodes, and need not always point to the active node. - -For example: https://pr1.vault.example.com:8200/v1/pki`, - }, - "aia_path": { - Type: framework.TypeString, - Description: `Optional URI to this mount's AIA distribution -point; may refer to an external non-Vault responder. This is for resolving AIA -URLs and providing the {{cluster_aia_path}} template parameter and will not -be used for other purposes. As such, unlike path above, this could safely -be an insecure transit mechanism (like HTTP without TLS). - -For example: http://cdn.example.com/pr1/pki`, - }, - }, - }}, - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathReadCluster, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "cluster-configuration", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "path": { - Type: framework.TypeString, - Description: `Canonical URI to this mount on this performance -replication cluster's external address. This is for resolving AIA URLs and -providing the {{cluster_path}} template parameter but might be used for other -purposes in the future. - -This should only point back to this particular PR replica and should not ever -point to another PR cluster. It may point to any node in the PR replica, -including standby nodes, and need not always point to the active node. - -For example: https://pr1.vault.example.com:8200/v1/pki`, - Required: true, - }, - "aia_path": { - Type: framework.TypeString, - Description: `Optional URI to this mount's AIA distribution -point; may refer to an external non-Vault responder. This is for resolving AIA -URLs and providing the {{cluster_aia_path}} template parameter and will not -be used for other purposes. As such, unlike path above, this could safely -be an insecure transit mechanism (like HTTP without TLS). - -For example: http://cdn.example.com/pr1/pki`, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: pathConfigClusterHelpSyn, - HelpDescription: pathConfigClusterHelpDesc, - } -} - -func (b *backend) pathReadCluster(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - cfg, err := sc.getClusterConfig() - if err != nil { - return nil, err - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "path": cfg.Path, - "aia_path": cfg.AIAPath, - }, - } - - return resp, nil -} - -func (b *backend) pathWriteCluster(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - cfg, err := sc.getClusterConfig() - if err != nil { - return nil, err - } - - if value, ok := data.GetOk("path"); ok { - cfg.Path = value.(string) - - // This field is required by ACME, if ever we allow un-setting in the - // future, this code will need to verify that ACME is not enabled. - if !govalidator.IsURL(cfg.Path) { - return nil, fmt.Errorf("invalid, non-URL path given to cluster: %v", cfg.Path) - } - } - - if value, ok := data.GetOk("aia_path"); ok { - cfg.AIAPath = value.(string) - if !govalidator.IsURL(cfg.AIAPath) { - return nil, fmt.Errorf("invalid, non-URL aia_path given to cluster: %v", cfg.AIAPath) - } - } - - if err := sc.writeClusterConfig(cfg); err != nil { - return nil, err - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "path": cfg.Path, - "aia_path": cfg.AIAPath, - }, - } - - return resp, nil -} - -const pathConfigClusterHelpSyn = ` -Set cluster-local configuration, including address to this PR cluster. -` - -const pathConfigClusterHelpDesc = ` -This path allows you to set cluster-local configuration, including the -URI to this performance replication cluster. This allows you to use -templated AIA URLs with /config/urls and /issuer/:issuer_ref, setting the -reference to the cluster's URI. - -Only one address can be specified for any given cluster. -` diff --git a/builtin/logical/pki/path_config_crl.go b/builtin/logical/pki/path_config_crl.go index 0249e6f084b70..30b8047b53fc6 100644 --- a/builtin/logical/pki/path_config_crl.go +++ b/builtin/logical/pki/path_config_crl.go @@ -1,15 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" "fmt" - "net/http" "time" - "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" @@ -19,44 +14,33 @@ const latestCrlConfigVersion = 1 // CRLConfig holds basic CRL configuration information type crlConfig struct { - Version int `json:"version"` - Expiry string `json:"expiry"` - Disable bool `json:"disable"` - OcspDisable bool `json:"ocsp_disable"` - AutoRebuild bool `json:"auto_rebuild"` - AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` - OcspExpiry string `json:"ocsp_expiry"` - EnableDelta bool `json:"enable_delta"` - DeltaRebuildInterval string `json:"delta_rebuild_interval"` - UseGlobalQueue bool `json:"cross_cluster_revocation"` - UnifiedCRL bool `json:"unified_crl"` - UnifiedCRLOnExistingPaths bool `json:"unified_crl_on_existing_paths"` + Version int `json:"version"` + Expiry string `json:"expiry"` + Disable bool `json:"disable"` + OcspDisable bool `json:"ocsp_disable"` + AutoRebuild bool `json:"auto_rebuild"` + AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` + OcspExpiry string `json:"ocsp_expiry"` + EnableDelta bool `json:"enable_delta"` + DeltaRebuildInterval string `json:"delta_rebuild_interval"` } // Implicit default values for the config if it does not exist. var defaultCrlConfig = crlConfig{ - Version: latestCrlConfigVersion, - Expiry: "72h", - Disable: false, - OcspDisable: false, - OcspExpiry: "12h", - AutoRebuild: false, - AutoRebuildGracePeriod: "12h", - EnableDelta: false, - DeltaRebuildInterval: "15m", - UseGlobalQueue: false, - UnifiedCRL: false, - UnifiedCRLOnExistingPaths: false, + Version: latestCrlConfigVersion, + Expiry: "72h", + Disable: false, + OcspDisable: false, + OcspExpiry: "12h", + AutoRebuild: false, + AutoRebuildGracePeriod: "12h", + EnableDelta: false, + DeltaRebuildInterval: "15m", } func pathConfigCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/crl", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - Fields: map[string]*framework.FieldSchema{ "expiry": { Type: framework.TypeString, @@ -96,171 +80,14 @@ the NextUpdate field); defaults to 12 hours`, Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, Default: "15m", }, - "cross_cluster_revocation": { - Type: framework.TypeBool, - Description: `Whether to enable a global, cross-cluster revocation queue. -Must be used with auto_rebuild=true.`, - }, - "unified_crl": { - Type: framework.TypeBool, - Description: `If set to true enables global replication of revocation entries, -also enabling unified versions of OCSP and CRLs if their respective features are enabled. -disable for CRLs and ocsp_disable for OCSP.`, - Default: "false", - }, - "unified_crl_on_existing_paths": { - Type: framework.TypeBool, - Description: `If set to true, -existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, - Default: "false", - }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "crl-configuration", - }, Callback: b.pathCRLRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "expiry": { - Type: framework.TypeString, - Description: `The amount of time the generated CRL should be -valid; defaults to 72 hours`, - Required: true, - }, - "disable": { - Type: framework.TypeBool, - Description: `If set to true, disables generating the CRL entirely.`, - Required: true, - }, - "ocsp_disable": { - Type: framework.TypeBool, - Description: `If set to true, ocsp unauthorized responses will be returned.`, - Required: true, - }, - "ocsp_expiry": { - Type: framework.TypeString, - Description: `The amount of time an OCSP response will be valid (controls -the NextUpdate field); defaults to 12 hours`, - Required: true, - }, - "auto_rebuild": { - Type: framework.TypeBool, - Description: `If set to true, enables automatic rebuilding of the CRL`, - Required: true, - }, - "auto_rebuild_grace_period": { - Type: framework.TypeString, - Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, - Required: true, - }, - "enable_delta": { - Type: framework.TypeBool, - Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, - Required: true, - }, - "delta_rebuild_interval": { - Type: framework.TypeString, - Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, - Required: true, - }, - "cross_cluster_revocation": { - Type: framework.TypeBool, - Description: `Whether to enable a global, cross-cluster revocation queue. -Must be used with auto_rebuild=true.`, - Required: true, - }, - "unified_crl": { - Type: framework.TypeBool, - Description: `If set to true enables global replication of revocation entries, -also enabling unified versions of OCSP and CRLs if their respective features are enabled. -disable for CRLs and ocsp_disable for OCSP.`, - Required: true, - }, - "unified_crl_on_existing_paths": { - Type: framework.TypeBool, - Description: `If set to true, -existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, - Required: true, - }, - }, - }}, - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCRLWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "crl", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "expiry": { - Type: framework.TypeString, - Description: `The amount of time the generated CRL should be -valid; defaults to 72 hours`, - Default: "72h", - }, - "disable": { - Type: framework.TypeBool, - Description: `If set to true, disables generating the CRL entirely.`, - }, - "ocsp_disable": { - Type: framework.TypeBool, - Description: `If set to true, ocsp unauthorized responses will be returned.`, - }, - "ocsp_expiry": { - Type: framework.TypeString, - Description: `The amount of time an OCSP response will be valid (controls -the NextUpdate field); defaults to 12 hours`, - Default: "1h", - }, - "auto_rebuild": { - Type: framework.TypeBool, - Description: `If set to true, enables automatic rebuilding of the CRL`, - }, - "auto_rebuild_grace_period": { - Type: framework.TypeString, - Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, - Default: "12h", - }, - "enable_delta": { - Type: framework.TypeBool, - Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, - }, - "delta_rebuild_interval": { - Type: framework.TypeString, - Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, - Default: "15m", - }, - "cross_cluster_revocation": { - Type: framework.TypeBool, - Description: `Whether to enable a global, cross-cluster revocation queue. -Must be used with auto_rebuild=true.`, - Required: false, - }, - "unified_crl": { - Type: framework.TypeBool, - Description: `If set to true enables global replication of revocation entries, -also enabling unified versions of OCSP and CRLs if their respective features are enabled. -disable for CRLs and ocsp_disable for OCSP.`, - Required: false, - }, - "unified_crl_on_existing_paths": { - Type: framework.TypeBool, - Description: `If set to true, -existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, - Required: false, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -279,7 +106,18 @@ func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, _ *fram return nil, err } - return genResponseFromCrlConfig(config), nil + return &logical.Response{ + Data: map[string]interface{}{ + "expiry": config.Expiry, + "disable": config.Disable, + "ocsp_disable": config.OcspDisable, + "ocsp_expiry": config.OcspExpiry, + "auto_rebuild": config.AutoRebuild, + "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, + "enable_delta": config.EnableDelta, + "delta_rebuild_interval": config.DeltaRebuildInterval, + }, + }, nil } func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -332,7 +170,6 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra config.AutoRebuildGracePeriod = autoRebuildGracePeriod } - oldEnableDelta := config.EnableDelta if enableDeltaRaw, ok := d.GetOk("enable_delta"); ok { config.EnableDelta = enableDeltaRaw.(bool) } @@ -345,23 +182,6 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra config.DeltaRebuildInterval = deltaRebuildInterval } - if useGlobalQueue, ok := d.GetOk("cross_cluster_revocation"); ok { - config.UseGlobalQueue = useGlobalQueue.(bool) - } - - oldUnifiedCRL := config.UnifiedCRL - if unifiedCrlRaw, ok := d.GetOk("unified_crl"); ok { - config.UnifiedCRL = unifiedCrlRaw.(bool) - } - - if unifiedCrlOnExistingPathsRaw, ok := d.GetOk("unified_crl_on_existing_paths"); ok { - config.UnifiedCRLOnExistingPaths = unifiedCrlOnExistingPathsRaw.(bool) - } - - if config.UnifiedCRLOnExistingPaths && !config.UnifiedCRL { - return logical.ErrorResponse("unified_crl_on_existing_paths cannot be enabled if unified_crl is disabled"), nil - } - expiry, _ := time.ParseDuration(config.Expiry) if config.AutoRebuild { gracePeriod, _ := time.ParseDuration(config.AutoRebuildGracePeriod) @@ -377,36 +197,8 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra } } - if !config.AutoRebuild { - if config.EnableDelta { - return logical.ErrorResponse("Delta CRLs cannot be enabled when auto rebuilding is disabled as the complete CRL is always regenerated!"), nil - } - - if config.UseGlobalQueue { - return logical.ErrorResponse("Global, cross-cluster revocation queue cannot be enabled when auto rebuilding is disabled as the local cluster may not have the certificate entry!"), nil - } - } - - if !constants.IsEnterprise && config.UseGlobalQueue { - return logical.ErrorResponse("Global, cross-cluster revocation queue (cross_cluster_revocation) can only be enabled on Vault Enterprise."), nil - } - - if !constants.IsEnterprise && config.UnifiedCRL { - return logical.ErrorResponse("unified_crl can only be enabled on Vault Enterprise"), nil - } - - isLocalMount := b.System().LocalMount() - if isLocalMount && config.UseGlobalQueue { - return logical.ErrorResponse("Global, cross-cluster revocation queue (cross_cluster_revocation) cannot be enabled on local mounts."), - nil - } - - if isLocalMount && config.UnifiedCRL { - return logical.ErrorResponse("unified_crl cannot be enabled on local mounts."), nil - } - - if !config.AutoRebuild && config.UnifiedCRL { - return logical.ErrorResponse("unified_crl=true requires auto_rebuild=true, as unified CRLs cannot be rebuilt on every revocation."), nil + if config.EnableDelta && !config.AutoRebuild { + return logical.ErrorResponse("Delta CRLs cannot be enabled when auto rebuilding is disabled as the complete CRL is always regenerated!"), nil } entry, err := logical.StorageEntryJSON("config/crl", config) @@ -421,17 +213,10 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra b.crlBuilder.markConfigDirty() b.crlBuilder.reloadConfigIfRequired(sc) - resp := genResponseFromCrlConfig(config) - - // Note this only affects/happens on the main cluster node, if you need to - // notify something based on a configuration change on all server types - // have a look at crlBuilder::reloadConfigIfRequired - if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) || (oldEnableDelta != config.EnableDelta) || (oldUnifiedCRL != config.UnifiedCRL) { + if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) { // It wasn't disabled but now it is (or equivalently, we were set to - // auto-rebuild and we aren't now or equivalently, we changed our - // mind about delta CRLs and need a new complete one or equivalently, - // we changed our mind about unified CRLs), rotate the CRLs. - warnings, crlErr := b.crlBuilder.rebuild(sc, true) + // auto-rebuild and we aren't now), so rotate the CRL. + crlErr := b.crlBuilder.rebuild(ctx, b, req, true) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -440,30 +225,9 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } } - for index, warning := range warnings { - resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } } - return resp, nil -} - -func genResponseFromCrlConfig(config *crlConfig) *logical.Response { - return &logical.Response{ - Data: map[string]interface{}{ - "expiry": config.Expiry, - "disable": config.Disable, - "ocsp_disable": config.OcspDisable, - "ocsp_expiry": config.OcspExpiry, - "auto_rebuild": config.AutoRebuild, - "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, - "enable_delta": config.EnableDelta, - "delta_rebuild_interval": config.DeltaRebuildInterval, - "cross_cluster_revocation": config.UseGlobalQueue, - "unified_crl": config.UnifiedCRL, - "unified_crl_on_existing_paths": config.UnifiedCRLOnExistingPaths, - }, - } + return nil, nil } const pathConfigCRLHelpSyn = ` diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go index 341f3db635573..830ca34ad099d 100644 --- a/builtin/logical/pki/path_config_urls.go +++ b/builtin/logical/pki/path_config_urls.go @@ -1,27 +1,18 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" "fmt" - "net/http" - "strings" "github.com/asaskevich/govalidator" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" ) func pathConfigURLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/urls", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, - Fields: map[string]*framework.FieldSchema{ "issuing_certificates": { Type: framework.TypeCommaStringSlice, @@ -40,98 +31,14 @@ for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, Description: `Comma-separated list of URLs to be used for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, }, - - "enable_templating": { - Type: framework.TypeBool, - Description: `Whether or not to enabling templating of the -above AIA fields. When templating is enabled the special values '{{issuer_id}}', -'{{cluster_path}}', and '{{cluster_aia_path}}' are available, but the addresses -are not checked for URI validity until issuance time. Using '{{cluster_path}}' -requires /config/cluster's 'path' member to be set on all PR Secondary clusters -and using '{{cluster_aia_path}}' requires /config/cluster's 'aia_path' member -to be set on all PR secondary clusters.`, - Default: false, - }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "urls", - }, Callback: b.pathWriteURL, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "issuing_certificates": { - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, - }, - "crl_distribution_points": { - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, - }, - "ocsp_servers": { - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, - }, - "enable_templating": { - Type: framework.TypeBool, - Description: `Whether or not to enabling templating of the -above AIA fields. When templating is enabled the special values '{{issuer_id}}' -and '{{cluster_path}}' are available, but the addresses are not checked for -URI validity until issuance time. This requires /config/cluster's path to be -set on all PR Secondary clusters.`, - Default: false, - }, - }, - }}, - }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathReadURL, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "urls-configuration", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "issuing_certificates": { - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, - Required: true, - }, - "crl_distribution_points": { - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, - Required: true, - }, - "ocsp_servers": { - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, - Required: true, - }, - "enable_templating": { - Type: framework.TypeBool, - Description: `Whether or not to enable templating of the -above AIA fields. When templating is enabled the special values '{{issuer_id}}' -and '{{cluster_path}}' are available, but the addresses are not checked for -URI validity until issuance time. This requires /config/cluster's path to be -set on all PR Secondary clusters.`, - Required: true, - }, - }, - }}, - }, }, }, @@ -142,7 +49,7 @@ set on all PR Secondary clusters.`, func validateURLs(urls []string) string { for _, curr := range urls { - if !govalidator.IsURL(curr) || strings.Contains(curr, "{{issuer_id}}") || strings.Contains(curr, "{{cluster_path}}") || strings.Contains(curr, "{{cluster_aia_path}}") { + if !govalidator.IsURL(curr) { return curr } } @@ -150,17 +57,16 @@ func validateURLs(urls []string) string { return "" } -func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigEntry, error) { +func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*certutil.URLEntries, error) { entry, err := storage.Get(ctx, "urls") if err != nil { return nil, err } - entries := &aiaConfigEntry{ + entries := &certutil.URLEntries{ IssuingCertificates: []string{}, CRLDistributionPoints: []string{}, OCSPServers: []string{}, - EnableTemplating: false, } if entry == nil { @@ -174,7 +80,7 @@ func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigE return entries, nil } -func writeURLs(ctx context.Context, storage logical.Storage, entries *aiaConfigEntry) error { +func writeURLs(ctx context.Context, storage logical.Storage, entries *certutil.URLEntries) error { entry, err := logical.StorageEntryJSON("urls", entries) if err != nil { return err @@ -202,7 +108,6 @@ func (b *backend) pathReadURL(ctx context.Context, req *logical.Request, _ *fram "issuing_certificates": entries.IssuingCertificates, "crl_distribution_points": entries.CRLDistributionPoints, "ocsp_servers": entries.OCSPServers, - "enable_templating": entries.EnableTemplating, }, } @@ -215,68 +120,29 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * return nil, err } - if enableTemplating, ok := data.GetOk("enable_templating"); ok { - entries.EnableTemplating = enableTemplating.(bool) - } if urlsInt, ok := data.GetOk("issuing_certificates"); ok { entries.IssuingCertificates = urlsInt.([]string) - } - if urlsInt, ok := data.GetOk("crl_distribution_points"); ok { - entries.CRLDistributionPoints = urlsInt.([]string) - } - if urlsInt, ok := data.GetOk("ocsp_servers"); ok { - entries.OCSPServers = urlsInt.([]string) - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "issuing_certificates": entries.IssuingCertificates, - "crl_distribution_points": entries.CRLDistributionPoints, - "ocsp_servers": entries.OCSPServers, - "enable_templating": entries.EnableTemplating, - }, - } - - if entries.EnableTemplating && !b.useLegacyBundleCaStorage() { - sc := b.makeStorageContext(ctx, req.Storage) - issuers, err := sc.listIssuers() - if err != nil { - return nil, fmt.Errorf("unable to read issuers list to validate templated URIs: %w", err) - } - - if len(issuers) > 0 { - issuer, err := sc.fetchIssuerById(issuers[0]) - if err != nil { - return nil, fmt.Errorf("unable to read issuer to validate templated URIs: %w", err) - } - - _, err = entries.toURLEntries(sc, issuer.ID) - if err != nil { - resp.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", err)) - } - } - } else if !entries.EnableTemplating { if badURL := validateURLs(entries.IssuingCertificates); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil } - + } + if urlsInt, ok := data.GetOk("crl_distribution_points"); ok { + entries.CRLDistributionPoints = urlsInt.([]string) if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil } - + } + if urlsInt, ok := data.GetOk("ocsp_servers"); ok { + entries.OCSPServers = urlsInt.([]string) if badURL := validateURLs(entries.OCSPServers); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil } } - if err := writeURLs(ctx, req.Storage, entries); err != nil { - return nil, err - } - - return resp, nil + return nil, writeURLs(ctx, req.Storage, entries) } const pathConfigURLsHelpSyn = ` diff --git a/builtin/logical/pki/path_fetch.go b/builtin/logical/pki/path_fetch.go index 1798e705db88d..e0a1079eade76 100644 --- a/builtin/logical/pki/path_fetch.go +++ b/builtin/logical/pki/path_fetch.go @@ -1,70 +1,24 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" "encoding/pem" "fmt" - "net/http" "strings" - "time" - - "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) -var pathFetchReadSchema = map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: false, - }, - "revocation_time": { - Type: framework.TypeString, - Description: `Revocation time`, - Required: false, - }, - "revocation_time_rfc3339": { - Type: framework.TypeString, - Description: `Revocation time RFC 3339 formatted`, - Required: false, - }, - "issuer_id": { - Type: framework.TypeString, - Description: `ID of the issuer`, - Required: false, - }, - "ca_chain": { - Type: framework.TypeStringSlice, - Description: `Issuing CA Chain`, - Required: false, - }, - }, - }}, -} - // Returns the CA in raw format func pathFetchCA(b *backend) *framework.Path { return &framework.Path{ Pattern: `ca(/pem)?`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "ca-der|ca-pem", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, - Responses: pathFetchReadSchema, + Callback: b.pathFetchRead, }, }, @@ -78,15 +32,9 @@ func pathFetchCAChain(b *backend) *framework.Path { return &framework.Path{ Pattern: `(cert/)?ca_chain`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "ca-chain-pem|cert-ca-chain", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, - Responses: pathFetchReadSchema, + Callback: b.pathFetchRead, }, }, @@ -100,33 +48,6 @@ func pathFetchCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl(/pem|/delta(/pem)?)?`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "crl-der|crl-pem|crl-delta|crl-delta-pem", - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, - Responses: pathFetchReadSchema, - }, - }, - - HelpSynopsis: pathFetchHelpSyn, - HelpDescription: pathFetchHelpDesc, - } -} - -// Returns the CRL in raw format -func pathFetchUnifiedCRL(b *backend) *framework.Path { - return &framework.Path{ - Pattern: `unified-crl(/pem|/delta(/pem)?)?`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "unified-crl-der|unified-crl-pem|unified-crl-delta|unified-crl-delta-pem", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -142,12 +63,6 @@ func pathFetchUnifiedCRL(b *backend) *framework.Path { func pathFetchValidRaw(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)/raw(/pem)?`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "cert-raw-der|cert-raw-pem", - }, - Fields: map[string]*framework.FieldSchema{ "serial": { Type: framework.TypeString, @@ -158,8 +73,7 @@ hyphen-separated octal`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, - Responses: pathFetchReadSchema, + Callback: b.pathFetchRead, }, }, @@ -173,12 +87,6 @@ hyphen-separated octal`, func pathFetchValid(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "cert", - }, - Fields: map[string]*framework.FieldSchema{ "serial": { Type: framework.TypeString, @@ -189,8 +97,7 @@ hyphen-separated octal`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, - Responses: pathFetchReadSchema, + Callback: b.pathFetchRead, }, }, @@ -201,23 +108,12 @@ hyphen-separated octal`, // This returns the CRL in a non-raw format func pathFetchCRLViaCertPath(b *backend) *framework.Path { - pattern := `cert/(crl|delta-crl)` - if constants.IsEnterprise { - pattern = `cert/(crl|delta-crl|unified-crl|unified-delta-crl)` - } - return &framework.Path{ - Pattern: pattern, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "cert-crl|cert-delta-crl|cert-unified-crl|cert-unified-delta-crl", - }, + Pattern: `cert/(crl|delta-crl)`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, - Responses: pathFetchReadSchema, + Callback: b.pathFetchRead, }, }, @@ -231,26 +127,9 @@ func pathFetchListCerts(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "certs", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathFetchCertList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `A list of keys`, - Required: true, - }, - }, - }}, - }, }, }, @@ -277,9 +156,6 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data var certificate []byte var fullChain []byte var revocationTime int64 - var revocationIssuerId string - var revocationTimeRfc3339 string - response = &logical.Response{ Data: map[string]interface{}{}, } @@ -317,30 +193,11 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data if req.Path == "ca_chain" { contentType = "application/pkix-cert" } - case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl" || req.Path == "cert/delta-crl/raw" || req.Path == "cert/delta-crl/raw/pem" || req.Path == "unified-crl" || req.Path == "unified-crl/pem" || req.Path == "unified-crl/delta" || req.Path == "unified-crl/delta/pem" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-crl/raw" || req.Path == "cert/unified-crl/raw/pem" || req.Path == "cert/unified-delta-crl" || req.Path == "cert/unified-delta-crl/raw" || req.Path == "cert/unified-delta-crl/raw/pem": - config, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - retErr = err - goto reply - } - var isDelta bool - var isUnified bool - if strings.Contains(req.Path, "delta") { - isDelta = true - } - if strings.Contains(req.Path, "unified") || shouldLocalPathsUseUnified(config) { - isUnified = true - } - + case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl": modifiedCtx.reqType = ifModifiedCRL - if !isUnified && isDelta { + if strings.Contains(req.Path, "delta") { modifiedCtx.reqType = ifModifiedDeltaCRL - } else if isUnified && !isDelta { - modifiedCtx.reqType = ifModifiedUnifiedCRL - } else if isUnified && isDelta { - modifiedCtx.reqType = ifModifiedUnifiedDeltaCRL } - ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response) if err != nil || ret { retErr = err @@ -348,19 +205,14 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data } serial = legacyCRLPath - if !isUnified && isDelta { + if req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/delta-crl" { serial = deltaCRLPath - } else if isUnified && !isDelta { - serial = unifiedCRLPath - } else if isUnified && isDelta { - serial = unifiedDeltaCRLPath } - contentType = "application/pkix-crl" - if strings.Contains(req.Path, "pem") { + if req.Path == "crl/pem" || req.Path == "crl/delta/pem" { pemType = "X509 CRL" contentType = "application/x-pem-file" - } else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-delta-crl" { + } else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" { pemType = "X509 CRL" contentType = "" } @@ -424,7 +276,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data goto reply } - certEntry, funcErr = fetchCertBySerial(sc, req.Path, serial) + certEntry, funcErr = fetchCertBySerial(ctx, b, req, req.Path, serial) if funcErr != nil { switch funcErr.(type) { case errutil.UserError: @@ -452,7 +304,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data certificate = []byte(strings.TrimSpace(string(pem.EncodeToMemory(&block)))) } - revokedEntry, funcErr = fetchCertBySerial(sc, "revoked/", serial) + revokedEntry, funcErr = fetchCertBySerial(ctx, b, req, "revoked/", serial) if funcErr != nil { switch funcErr.(type) { case errutil.UserError: @@ -470,11 +322,6 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data return logical.ErrorResponse(fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)), nil } revocationTime = revInfo.RevocationTime - revocationIssuerId = revInfo.CertificateIssuer.String() - - if !revInfo.RevocationTimeUTC.IsZero() { - revocationTimeRfc3339 = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano) - } } reply: @@ -507,12 +354,6 @@ reply: default: response.Data["certificate"] = string(certificate) response.Data["revocation_time"] = revocationTime - response.Data["revocation_time_rfc3339"] = revocationTimeRfc3339 - // Only output this field if we have a value for it as it doesn't make sense for a - // bunch of code paths that go through here - if revocationIssuerId != "" { - response.Data["issuer_id"] = revocationIssuerId - } if len(fullChain) > 0 { response.Data["ca_chain"] = string(fullChain) diff --git a/builtin/logical/pki/path_fetch_issuers.go b/builtin/logical/pki/path_fetch_issuers.go index 48596bb5849c8..eeea1778e3dd6 100644 --- a/builtin/logical/pki/path_fetch_issuers.go +++ b/builtin/logical/pki/path_fetch_issuers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -8,7 +5,6 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "net/http" "strings" "time" @@ -21,31 +17,9 @@ func pathListIssuers(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuers/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "issuers", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListIssuersHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `A list of keys`, - Required: true, - }, - "key_info": { - Type: framework.TypeMap, - Description: `Key info with issuer name`, - Required: false, - }, - }, - }}, - }, }, }, @@ -84,28 +58,8 @@ func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Reque responseKeys = append(responseKeys, string(identifier)) responseInfo[string(identifier)] = map[string]interface{}{ - "issuer_name": issuer.Name, - "is_default": identifier == config.DefaultIssuerId, - "serial_number": issuer.SerialNumber, - - // While nominally this could be considered sensitive information - // to be returned on an unauthed endpoint, there's two mitigating - // circumstances: - // - // 1. Key IDs are purely random numbers generated by Vault and - // have no relationship to the actual key material. - // 2. They also don't _do_ anything by themselves. There is no - // modification of KeyIDs allowed, you need to be authenticated - // to Vault to understand what they mean, you _essentially_ - // get the same information from looking at/comparing various - // cert's SubjectPublicKeyInfo field, and there's the `default` - // reference that anyone with issuer generation capabilities - // can use even if they can't access any of the other /key/* - // endpoints. - // - // So all in all, exposing this value is not a security risk and - // is otherwise beneficial for the UI, hence its inclusion. - "key_id": issuer.KeyID, + "issuer_name": issuer.Name, + "is_default": identifier == config.DefaultIssuerId, } } @@ -122,27 +76,15 @@ their identifier and their name (if set). func pathGetIssuer(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "$" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "issuer", - } - - return buildPathIssuer(b, pattern, displayAttrs) + return buildPathIssuer(b, pattern) } func pathGetUnauthedIssuer(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/(json|der|pem)$" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "issuer-json|issuer-der|issuer-pem", - } - - return buildPathGetIssuer(b, pattern, displayAttrs) + return buildPathGetIssuer(b, pattern) } -func buildPathIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathIssuer(b *backend, pattern string) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) @@ -197,137 +139,30 @@ for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, Description: `Comma-separated list of URLs to be used for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, } - fields["enable_aia_url_templating"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Whether or not to enabling templating of the -above AIA fields. When templating is enabled the special values '{{issuer_id}}', -'{{cluster_path}}', '{{cluster_aia_path}}' are available, but the addresses are -not checked for URL validity until issuance time. Using '{{cluster_path}}' -requires /config/cluster's 'path' member to be set on all PR Secondary clusters -and using '{{cluster_aia_path}}' requires /config/cluster's 'aia_path' member -to be set on all PR secondary clusters.`, - Default: false, - } - - updateIssuerSchema := map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "issuer_id": { - Type: framework.TypeString, - Description: `Issuer Id`, - Required: false, - }, - "issuer_name": { - Type: framework.TypeString, - Description: `Issuer Name`, - Required: false, - }, - "key_id": { - Type: framework.TypeString, - Description: `Key Id`, - Required: false, - }, - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: false, - }, - "manual_chain": { - Type: framework.TypeStringSlice, - Description: `Manual Chain`, - Required: false, - }, - "ca_chain": { - Type: framework.TypeStringSlice, - Description: `CA Chain`, - Required: false, - }, - "leaf_not_after_behavior": { - Type: framework.TypeString, - Description: `Leaf Not After Behavior`, - Required: false, - }, - "usage": { - Type: framework.TypeStringSlice, - Description: `Usage`, - Required: false, - }, - "revocation_signature_algorithm": { - Type: framework.TypeString, - Description: `Revocation Signature Alogrithm`, - Required: false, - }, - "revoked": { - Type: framework.TypeBool, - Description: `Revoked`, - Required: false, - }, - "revocation_time": { - Type: framework.TypeInt, - Required: false, - }, - "revocation_time_rfc3339": { - Type: framework.TypeString, - Required: false, - }, - "issuing_certificates": { - Type: framework.TypeStringSlice, - Description: `Issuing Certificates`, - Required: false, - }, - "crl_distribution_points": { - Type: framework.TypeStringSlice, - Description: `CRL Distribution Points`, - Required: false, - }, - "ocsp_servers": { - Type: framework.TypeStringSlice, - Description: `OSCP Servers`, - Required: false, - }, - "enable_aia_url_templating": { - Type: framework.TypeBool, - Description: `Whether or not templating is enabled for AIA fields`, - Required: false, - }, - }, - }}, - } return &framework.Path{ // Returns a JSON entry. - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: fields, + Pattern: pattern, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathGetIssuer, - Responses: updateIssuerSchema, + Callback: b.pathGetIssuer, }, logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateIssuer, - Responses: updateIssuerSchema, - + Callback: b.pathUpdateIssuer, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathDeleteIssuer, - Responses: map[int][]framework.Response{ - http.StatusNoContent: {{ - Description: "No Content", - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.PatchOperation: &framework.PathOperation{ - Callback: b.pathPatchIssuer, - Responses: updateIssuerSchema, + Callback: b.pathPatchIssuer, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -339,51 +174,18 @@ to be set on all PR secondary clusters.`, } } -func buildPathGetIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathGetIssuer(b *backend, pattern string) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefField(fields) - getIssuerSchema := map[int][]framework.Response{ - http.StatusNotModified: {{ - Description: "Not Modified", - }}, - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "issuer_id": { - Type: framework.TypeString, - Description: `Issuer Id`, - Required: true, - }, - "issuer_name": { - Type: framework.TypeString, - Description: `Issuer Name`, - Required: true, - }, - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "ca_chain": { - Type: framework.TypeStringSlice, - Description: `CA Chain`, - Required: true, - }, - }, - }}, - } - return &framework.Path{ // Returns a JSON entry. - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: fields, + Pattern: pattern, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathGetIssuer, - Responses: getIssuerSchema, + Callback: b.pathGetIssuer, }, }, @@ -463,7 +265,6 @@ func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { data["issuing_certificates"] = issuer.AIAURIs.IssuingCertificates data["crl_distribution_points"] = issuer.AIAURIs.CRLDistributionPoints data["ocsp_servers"] = issuer.AIAURIs.OCSPServers - data["enable_aia_url_templating"] = issuer.AIAURIs.EnableTemplating } response := &logical.Response{ @@ -560,17 +361,16 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } // AIA access changes - enableTemplating := data.Get("enable_aia_url_templating").(bool) issuerCertificates := data.Get("issuing_certificates").([]string) - if badURL := validateURLs(issuerCertificates); !enableTemplating && badURL != "" { + if badURL := validateURLs(issuerCertificates); badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil } crlDistributionPoints := data.Get("crl_distribution_points").([]string) - if badURL := validateURLs(crlDistributionPoints); !enableTemplating && badURL != "" { + if badURL := validateURLs(crlDistributionPoints); badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil } ocspServers := data.Get("ocsp_servers").([]string) - if badURL := validateURLs(ocspServers); !enableTemplating && badURL != "" { + if badURL := validateURLs(ocspServers); badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil } @@ -602,7 +402,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da // cert itself. cert, err := issuer.GetCertificate() if err != nil { - return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) + return nil, fmt.Errorf("unable to parse issuer's certificate: %v", err) } if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil @@ -618,7 +418,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } if issuer.AIAURIs == nil && (len(issuerCertificates) > 0 || len(crlDistributionPoints) > 0 || len(ocspServers) > 0) { - issuer.AIAURIs = &aiaConfigEntry{} + issuer.AIAURIs = &certutil.URLEntries{} } if issuer.AIAURIs != nil { // Associative mapping from data source to destination on the @@ -649,10 +449,6 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da modified = true } } - if enableTemplating != issuer.AIAURIs.EnableTemplating { - issuer.AIAURIs.EnableTemplating = enableTemplating - modified = true - } // If no AIA URLs exist on the issuer, set the AIA URLs entry to nil // to ease usage later. @@ -714,12 +510,6 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da if newName != oldName { addWarningOnDereferencing(sc, oldName, response) } - if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { - _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) - if aiaErr != nil { - response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) - } - } return response, err } @@ -825,7 +615,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat cert, err := issuer.GetCertificate() if err != nil { - return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) + return nil, fmt.Errorf("unable to parse issuer's certificate: %v", err) } if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil @@ -864,7 +654,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat // AIA access changes. if issuer.AIAURIs == nil { - issuer.AIAURIs = &aiaConfigEntry{} + issuer.AIAURIs = &certutil.URLEntries{} } // Associative mapping from data source to destination on the @@ -890,20 +680,12 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat }, } - if enableTemplatingRaw, ok := data.GetOk("enable_aia_url_templating"); ok { - enableTemplating := enableTemplatingRaw.(bool) - if enableTemplating != issuer.AIAURIs.EnableTemplating { - issuer.AIAURIs.EnableTemplating = true - modified = true - } - } - // For each pair, if it is different on the object, update it. for _, pair := range pairs { rawURLsValue, ok := data.GetOk(pair.Source) if ok { urlsValue := rawURLsValue.([]string) - if badURL := validateURLs(urlsValue); !issuer.AIAURIs.EnableTemplating && badURL != "" { + if badURL := validateURLs(urlsValue); badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter %v: %s", pair.Source, badURL)), nil } @@ -975,12 +757,6 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat if newName != oldName { addWarningOnDereferencing(sc, oldName, response) } - if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { - _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) - if aiaErr != nil { - response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) - } - } return response, err } @@ -1056,8 +832,6 @@ func (b *backend) pathGetRawIssuer(ctx context.Context, req *logical.Request, da Data: map[string]interface{}{ "certificate": string(certificate), "ca_chain": issuer.CAChain, - "issuer_id": issuer.ID, - "issuer_name": issuer.Name, }, }, nil } @@ -1155,50 +929,21 @@ the certificate. func pathGetIssuerCRL(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationSuffix: "crl|crl-pem|crl-der|crl-delta|crl-delta-pem|crl-delta-der", - } - - return buildPathGetIssuerCRL(b, pattern, displayAttrs) -} - -func pathGetIssuerUnifiedCRL(b *backend) *framework.Path { - pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/unified-crl(/pem|/der|/delta(/pem|/der)?)?" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationSuffix: "unified-crl|unified-crl-pem|unified-crl-der|unified-crl-delta|unified-crl-delta-pem|unified-crl-delta-der", - } - - return buildPathGetIssuerCRL(b, pattern, displayAttrs) + return buildPathGetIssuerCRL(b, pattern) } -func buildPathGetIssuerCRL(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathGetIssuerCRL(b *backend, pattern string) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) return &framework.Path{ // Returns raw values. - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: fields, + Pattern: pattern, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathGetIssuerCRL, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "crl": { - Type: framework.TypeString, - Required: false, - }, - }, - }}, - }, }, }, @@ -1217,39 +962,19 @@ func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, da return logical.ErrorResponse("missing issuer reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - warnings, err := b.crlBuilder.rebuildIfForced(sc) - if err != nil { + if err := b.crlBuilder.rebuildIfForced(ctx, b, req); err != nil { return nil, err } - if len(warnings) > 0 { - // Since this is a fetch of a specific CRL, this most likely comes - // from an automated system of some sort; these warnings would be - // ignored and likely meaningless. Log them instead. - msg := "During rebuild of CRL on issuer CRL fetch, got the following warnings:" - for index, warning := range warnings { - msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) - } - b.Logger().Warn(msg) - } var certificate []byte var contentType string - isUnified := strings.Contains(req.Path, "unified") - isDelta := strings.Contains(req.Path, "delta") - + sc := b.makeStorageContext(ctx, req.Storage) response := &logical.Response{} var crlType ifModifiedReqType = ifModifiedCRL - - if !isUnified && isDelta { + if strings.Contains(req.Path, "delta") { crlType = ifModifiedDeltaCRL - } else if isUnified && !isDelta { - crlType = ifModifiedUnifiedCRL - } else if isUnified && isDelta { - crlType = ifModifiedUnifiedDeltaCRL } - ret, err := sendNotModifiedResponseIfNecessary(&IfModifiedSinceHelper{req: req, reqType: crlType}, sc, response) if err != nil { return nil, err @@ -1257,8 +982,7 @@ func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, da if ret { return response, nil } - - crlPath, err := sc.resolveIssuerCRLPath(issuerName, isUnified) + crlPath, err := sc.resolveIssuerCRLPath(issuerName) if err != nil { return nil, err } diff --git a/builtin/logical/pki/path_fetch_keys.go b/builtin/logical/pki/path_fetch_keys.go index 23b3bf58dcb22..d0a01cb429b0e 100644 --- a/builtin/logical/pki/path_fetch_keys.go +++ b/builtin/logical/pki/path_fetch_keys.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" "crypto" "fmt" - "net/http" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -20,31 +16,9 @@ func pathListKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "keys", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ - Callback: b.pathListKeysHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `A list of keys`, - Required: true, - }, - "key_info": { - Type: framework.TypeMap, - Description: `Key info with issuer name`, - Required: false, - }, - }, - }}, - }, + Callback: b.pathListKeysHandler, ForwardPerformanceStandby: false, ForwardPerformanceSecondary: false, }, @@ -98,19 +72,12 @@ func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, func pathKey(b *backend) *framework.Path { pattern := "key/" + framework.GenericNameRegex(keyRefParam) - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "key", - } - - return buildPathKey(b, pattern, displayAttrs) + return buildPathKey(b, pattern) } -func buildPathKey(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathKey(b *backend, pattern string) *framework.Path { return &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: pattern, Fields: map[string]*framework.FieldSchema{ keyRefParam: { @@ -126,81 +93,17 @@ func buildPathKey(b *backend, pattern string, displayAttrs *framework.DisplayAtt Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathGetKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: `Key Id`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `Key Name`, - Required: true, - }, - "key_type": { - Type: framework.TypeString, - Description: `Key Type`, - Required: true, - }, - "subject_key_id": { - Type: framework.TypeString, - Description: `RFC 5280 Subject Key Identifier of the public counterpart`, - Required: false, - }, - "managed_key_id": { - Type: framework.TypeString, - Description: `Managed Key Id`, - Required: false, - }, - "managed_key_name": { - Type: framework.TypeString, - Description: `Managed Key Name`, - Required: false, - }, - }, - }}, - }, + Callback: b.pathGetKeyHandler, ForwardPerformanceStandby: false, ForwardPerformanceSecondary: false, }, logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusNoContent: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: `Key Id`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `Key Name`, - Required: true, - }, - "key_type": { - Type: framework.TypeString, - Description: `Key Type`, - Required: true, - }, - }, - }}, - }, + Callback: b.pathUpdateKeyHandler, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathDeleteKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusNoContent: {{ - Description: "No Content", - }}, - }, + Callback: b.pathDeleteKeyHandler, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index 1a685630e63bb..cfcff87b04f37 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" "encoding/base64" "fmt" - "net/http" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -15,27 +11,13 @@ import ( ) func pathGenerateIntermediate(b *backend) *framework.Path { - pattern := "intermediate/generate/" + framework.GenericNameRegex("exported") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "generate", - OperationSuffix: "intermediate", - } - - return buildPathGenerateIntermediate(b, pattern, displayAttrs) + return buildPathGenerateIntermediate(b, "intermediate/generate/"+framework.GenericNameRegex("exported")) } func pathSetSignedIntermediate(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "intermediate/set-signed", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "set-signed", - OperationSuffix: "intermediate", - }, - Fields: map[string]*framework.FieldSchema{ "certificate": { Type: framework.TypeString, @@ -49,38 +31,6 @@ appended to the bundle.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathImportIssuers, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "mapping": { - Type: framework.TypeMap, - Description: "A mapping of issuer_id to key_id for all issuers included in this request", - Required: true, - }, - "imported_keys": { - Type: framework.TypeCommaStringSlice, - Description: "Net-new keys imported as a part of this request", - Required: true, - }, - "imported_issuers": { - Type: framework.TypeCommaStringSlice, - Description: "Net-new issuers imported as a part of this request", - Required: true, - }, - "existing_keys": { - Type: framework.TypeCommaStringSlice, - Description: "Existing keys specified as part of the import bundle of this request", - Required: true, - }, - "existing_issuers": { - Type: framework.TypeCommaStringSlice, - Description: "Existing issuers specified as part of the import bundle of this request", - Required: true, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index 388e80cd21a78..7203d56c73cf0 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -10,7 +7,6 @@ import ( "encoding/base64" "encoding/pem" "fmt" - "net/http" "strings" "time" @@ -23,78 +19,21 @@ import ( func pathIssue(b *backend) *framework.Path { pattern := "issue/" + framework.GenericNameRegex("role") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "issue", - OperationSuffix: "with-role", - } - - return buildPathIssue(b, pattern, displayAttrs) + return buildPathIssue(b, pattern) } func pathIssuerIssue(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/issue/" + framework.GenericNameRegex("role") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "issue", - OperationSuffix: "with-role", - } - - return buildPathIssue(b, pattern, displayAttrs) + return buildPathIssue(b, pattern) } -func buildPathIssue(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathIssue(b *backend, pattern string) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: pattern, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.metricsWrap("issue", roleRequired, b.pathIssue), - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "issuing_ca": { - Type: framework.TypeString, - Description: `Issuing Certificate Authority`, - Required: true, - }, - "ca_chain": { - Type: framework.TypeCommaStringSlice, - Description: `Certificate Chain`, - Required: false, - }, - "serial_number": { - Type: framework.TypeString, - Description: `Serial Number`, - Required: false, - }, - "expiration": { - Type: framework.TypeString, - Description: `Time of expiration`, - Required: false, - }, - "private_key": { - Type: framework.TypeString, - Description: `Private key`, - Required: false, - }, - "private_key_type": { - Type: framework.TypeString, - Description: `Private key type`, - Required: false, - }, - }, - }}, - }, }, }, @@ -108,78 +47,21 @@ func buildPathIssue(b *backend, pattern string, displayAttrs *framework.DisplayA func pathSign(b *backend) *framework.Path { pattern := "sign/" + framework.GenericNameRegex("role") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "sign", - OperationSuffix: "with-role", - } - - return buildPathSign(b, pattern, displayAttrs) + return buildPathSign(b, pattern) } func pathIssuerSign(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign/" + framework.GenericNameRegex("role") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "sign", - OperationSuffix: "with-role", - } - - return buildPathSign(b, pattern, displayAttrs) + return buildPathSign(b, pattern) } -func buildPathSign(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathSign(b *backend, pattern string) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: pattern, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.metricsWrap("sign", roleRequired, b.pathSign), - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "issuing_ca": { - Type: framework.TypeString, - Description: `Issuing Certificate Authority`, - Required: true, - }, - "ca_chain": { - Type: framework.TypeCommaStringSlice, - Description: `Certificate Chain`, - Required: false, - }, - "serial_number": { - Type: framework.TypeString, - Description: `Serial Number`, - Required: true, - }, - "expiration": { - Type: framework.TypeString, - Description: `Time of expiration`, - Required: true, - }, - "private_key": { - Type: framework.TypeString, - Description: `Private key`, - Required: false, - }, - "private_key_type": { - Type: framework.TypeString, - Description: `Private key type`, - Required: false, - }, - }, - }}, - }, }, }, @@ -200,79 +82,22 @@ func buildPathSign(b *backend, pattern string, displayAttrs *framework.DisplayAt func pathIssuerSignVerbatim(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-verbatim" + framework.OptionalParamRegex("role") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "sign", - OperationSuffix: "verbatim|verbatim-with-role", - } - - return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) + return buildPathIssuerSignVerbatim(b, pattern) } func pathSignVerbatim(b *backend) *framework.Path { pattern := "sign-verbatim" + framework.OptionalParamRegex("role") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "sign", - OperationSuffix: "verbatim|verbatim-with-role", - } - - return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) + return buildPathIssuerSignVerbatim(b, pattern) } -func buildPathIssuerSignVerbatim(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathIssuerSignVerbatim(b *backend, pattern string) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: getCsrSignVerbatimSchemaFields(), + Pattern: pattern, + Fields: map[string]*framework.FieldSchema{}, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.metricsWrap("sign-verbatim", roleOptional, b.pathSignVerbatim), - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "issuing_ca": { - Type: framework.TypeString, - Description: `Issuing Certificate Authority`, - Required: true, - }, - "ca_chain": { - Type: framework.TypeCommaStringSlice, - Description: `Certificate Chain`, - Required: false, - }, - "serial_number": { - Type: framework.TypeString, - Description: `Serial Number`, - Required: false, - }, - "expiration": { - Type: framework.TypeString, - Description: `Time of expiration`, - Required: false, - }, - "private_key": { - Type: framework.TypeString, - Description: `Private key`, - Required: false, - }, - "private_key_type": { - Type: framework.TypeString, - Description: `Private key type`, - Required: false, - }, - }, - }}, - }, }, }, @@ -280,6 +105,61 @@ func buildPathIssuerSignVerbatim(b *backend, pattern string, displayAttrs *frame HelpDescription: pathIssuerSignVerbatimHelpDesc, } + ret.Fields = addNonCACommonFields(ret.Fields) + + ret.Fields["csr"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `PEM-format CSR to be signed. Values will be +taken verbatim from the CSR, except for +basic constraints.`, + } + + ret.Fields["key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + ret.Fields["ext_key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: []string{}, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + ret.Fields["ext_key_usage_oids"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or list of extended key usage oids.`, + } + + ret.Fields["signature_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 0, + }, + } + + ret.Fields["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + } + return ret } @@ -322,7 +202,50 @@ func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *fram // pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to // role restrictions func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { - entry := buildSignVerbatimRole(data, role) + entry := &roleEntry{ + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + AllowWildcardCertificates: new(bool), + EnforceHostnames: false, + KeyType: "any", + UseCSRCommonName: true, + UseCSRSANs: true, + AllowedOtherSANs: []string{"*"}, + AllowedSerialNumbers: []string{"*"}, + AllowedURISANs: []string{"*"}, + CNValidations: []string{"disabled"}, + GenerateLease: new(bool), + KeyUsage: data.Get("key_usage").([]string), + ExtKeyUsage: data.Get("ext_key_usage").([]string), + ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), + SignatureBits: data.Get("signature_bits").(int), + UsePSS: data.Get("use_pss").(bool), + } + *entry.AllowWildcardCertificates = true + + *entry.GenerateLease = false + + if role != nil { + if role.TTL > 0 { + entry.TTL = role.TTL + } + if role.MaxTTL > 0 { + entry.MaxTTL = role.MaxTTL + } + if role.GenerateLease != nil { + *entry.GenerateLease = *role.GenerateLease + } + if role.NotBeforeDuration > 0 { + entry.NotBeforeDuration = role.NotBeforeDuration + } + entry.NoStore = role.NoStore + entry.Issuer = role.Issuer + } + + if len(entry.Issuer) == 0 { + entry.Issuer = defaultRef + } return b.pathIssueSignCert(ctx, req, data, entry, true, true) } @@ -495,7 +418,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d if err != nil { return nil, fmt.Errorf("unable to store certificate locally: %w", err) } - b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + b.incrementTotalCertificatesCount(certsCounted, key) } if useCSR { diff --git a/builtin/logical/pki/path_manage_issuers.go b/builtin/logical/pki/path_manage_issuers.go index 0a4a4bc8cb314..712c80f7d7a92 100644 --- a/builtin/logical/pki/path_manage_issuers.go +++ b/builtin/logical/pki/path_manage_issuers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -9,7 +6,6 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "net/http" "strings" "time" @@ -19,89 +15,20 @@ import ( ) func pathIssuerGenerateRoot(b *backend) *framework.Path { - pattern := "issuers/generate/root/" + framework.GenericNameRegex("exported") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuers, - OperationVerb: "generate", - OperationSuffix: "root", - } - - return buildPathGenerateRoot(b, pattern, displayAttrs) + return buildPathGenerateRoot(b, "issuers/generate/root/"+framework.GenericNameRegex("exported")) } func pathRotateRoot(b *backend) *framework.Path { - pattern := "root/rotate/" + framework.GenericNameRegex("exported") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuers, - OperationVerb: "rotate", - OperationSuffix: "root", - } - - return buildPathGenerateRoot(b, pattern, displayAttrs) + return buildPathGenerateRoot(b, "root/rotate/"+framework.GenericNameRegex("exported")) } -func buildPathGenerateRoot(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathGenerateRoot(b *backend, pattern string) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: pattern, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAGenerateRoot, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "expiration": { - Type: framework.TypeString, - Description: `The expiration of the given.`, - Required: true, - }, - "serial_number": { - Type: framework.TypeString, - Description: `The requested Subject's named serial number.`, - Required: true, - }, - "certificate": { - Type: framework.TypeString, - Description: `The generated self-signed CA certificate.`, - Required: true, - }, - "issuing_ca": { - Type: framework.TypeString, - Description: `The issuing certificate authority.`, - Required: true, - }, - "issuer_id": { - Type: framework.TypeString, - Description: `The ID of the issuer`, - Required: true, - }, - "issuer_name": { - Type: framework.TypeString, - Description: `The name of the issuer.`, - Required: true, - }, - "key_id": { - Type: framework.TypeString, - Description: `The ID of the key.`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `The key name if given.`, - Required: true, - }, - "private_key": { - Type: framework.TypeString, - Description: `The private key if exported was specified.`, - Required: false, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -119,63 +46,20 @@ func buildPathGenerateRoot(b *backend, pattern string, displayAttrs *framework.D } func pathIssuerGenerateIntermediate(b *backend) *framework.Path { - pattern := "issuers/generate/intermediate/" + framework.GenericNameRegex("exported") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuers, - OperationVerb: "generate", - OperationSuffix: "intermediate", - } - - return buildPathGenerateIntermediate(b, pattern, displayAttrs) + return buildPathGenerateIntermediate(b, + "issuers/generate/intermediate/"+framework.GenericNameRegex("exported")) } func pathCrossSignIntermediate(b *backend) *framework.Path { - pattern := "intermediate/cross-sign" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "cross-sign", - OperationSuffix: "intermediate", - } - - return buildPathGenerateIntermediate(b, pattern, displayAttrs) + return buildPathGenerateIntermediate(b, "intermediate/cross-sign") } -func buildPathGenerateIntermediate(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathGenerateIntermediate(b *backend, pattern string) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: pattern, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathGenerateIntermediate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "csr": { - Type: framework.TypeString, - Description: `Certificate signing request.`, - Required: true, - }, - "key_id": { - Type: framework.TypeString, - Description: `Id of the key.`, - Required: true, - }, - "private_key": { - Type: framework.TypeString, - Description: `Generated private key.`, - Required: false, - }, - "private_key_type": { - Type: framework.TypeString, - Description: `Specifies the format used for marshaling the private key.`, - Required: false, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -206,13 +90,6 @@ with Active Directory Certificate Services.`, func pathImportIssuer(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuers/import/(cert|bundle)", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuers, - OperationVerb: "import", - OperationSuffix: "cert|bundle", - }, - Fields: map[string]*framework.FieldSchema{ "pem_bundle": { Type: framework.TypeString, @@ -224,38 +101,6 @@ secret-key (optional) and certificates.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathImportIssuers, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "mapping": { - Type: framework.TypeMap, - Description: "A mapping of issuer_id to key_id for all issuers included in this request", - Required: true, - }, - "imported_keys": { - Type: framework.TypeCommaStringSlice, - Description: "Net-new keys imported as a part of this request", - Required: true, - }, - "imported_issuers": { - Type: framework.TypeCommaStringSlice, - Description: "Net-new issuers imported as a part of this request", - Required: true, - }, - "existing_keys": { - Type: framework.TypeCommaStringSlice, - Description: "Existing keys specified as part of the import bundle of this request", - Required: true, - }, - "existing_issuers": { - Type: framework.TypeCommaStringSlice, - Description: "Existing issuers specified as part of the import bundle of this request", - Required: true, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -318,8 +163,6 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d var createdKeys []string var createdIssuers []string - var existingKeys []string - var existingIssuers []string issuerKeyMap := make(map[string]string) // Rather than using certutil.ParsePEMBundle (which restricts the @@ -376,8 +219,6 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d if !existing { createdKeys = append(createdKeys, key.ID.String()) - } else { - existingKeys = append(existingKeys, key.ID.String()) } } @@ -390,8 +231,6 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d issuerKeyMap[cert.ID.String()] = cert.KeyID.String() if !existing { createdIssuers = append(createdIssuers, cert.ID.String()) - } else { - existingIssuers = append(existingIssuers, cert.ID.String()) } } @@ -400,30 +239,25 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d "mapping": issuerKeyMap, "imported_keys": createdKeys, "imported_issuers": createdIssuers, - "existing_keys": existingKeys, - "existing_issuers": existingIssuers, }, } if len(createdIssuers) > 0 { - warnings, err := b.crlBuilder.rebuild(sc, true) + err := b.crlBuilder.rebuild(ctx, b, req, true) if err != nil { // Before returning, check if the error message includes the // string "PSS". If so, it indicates we might've wanted to modify // this issuer, so convert the error to a warning. if strings.Contains(err.Error(), "PSS") || strings.Contains(err.Error(), "pss") { - err = fmt.Errorf("Rebuilding the CRL failed with a message relating to the PSS signature algorithm. This likely means the revocation_signature_algorithm needs to be set on the newly imported issuer(s) because a managed key supports only the PSS algorithm; by default PKCS#1v1.5 was used to build the CRLs. CRLs will not be generated until this has been addressed, however the import was successful. The original error is reproduced below:\n\n\t%w", err) + err = fmt.Errorf("Rebuilding the CRL failed with a message relating to the PSS signature algorithm. This likely means the revocation_signature_algorithm needs to be set on the newly imported issuer(s) because a managed key supports only the PSS algorithm; by default PKCS#1v1.5 was used to build the CRLs. CRLs will not be generated until this has been addressed, however the import was successful. The original error is reproduced below:\n\n\t%v", err) } else { // Note to the caller that while this is an error, we did // successfully import the issuers. - err = fmt.Errorf("Rebuilding the CRL failed. While this is indicative of a problem with the imported issuers (perhaps because of their revocation_signature_algorithm), they did import successfully and are now usable. It is strongly suggested to fix the CRL building errors before continuing. The original error is reproduced below:\n\n\t%w", err) + err = fmt.Errorf("Rebuilding the CRL failed. While this is indicative of a problem with the imported issuers (perhaps because of their revocation_signature_algorithm), they did import successfully and are now usable. It is strongly suggested to fix the CRL building errors before continuing. The original error is reproduced below:\n\n\t%v", err) } return nil, err } - for index, warning := range warnings { - response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } var issuersWithKeys []string for _, issuer := range createdIssuers { @@ -515,100 +349,11 @@ func pathRevokeIssuer(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/revoke", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "revoke", - OperationSuffix: "issuer", - }, - - Fields: fields, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRevokeIssuer, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "issuer_id": { - Type: framework.TypeString, - Description: `ID of the issuer`, - Required: true, - }, - "issuer_name": { - Type: framework.TypeString, - Description: `Name of the issuer`, - Required: true, - }, - "key_id": { - Type: framework.TypeString, - Description: `ID of the Key`, - Required: true, - }, - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "manual_chain": { - Type: framework.TypeCommaStringSlice, - Description: `Manual Chain`, - Required: true, - }, - "ca_chain": { - Type: framework.TypeCommaStringSlice, - Description: `Certificate Authority Chain`, - Required: true, - }, - "leaf_not_after_behavior": { - Type: framework.TypeString, - Description: ``, - Required: true, - }, - "usage": { - Type: framework.TypeString, - Description: `Allowed usage`, - Required: true, - }, - "revocation_signature_algorithm": { - Type: framework.TypeString, - Description: `Which signature algorithm to use when building CRLs`, - Required: true, - }, - "revoked": { - Type: framework.TypeBool, - Description: `Whether the issuer was revoked`, - Required: true, - }, - "issuing_certificates": { - Type: framework.TypeCommaStringSlice, - Description: `Specifies the URL values for the Issuing Certificate field`, - Required: true, - }, - "crl_distribution_points": { - Type: framework.TypeStringSlice, - Description: `Specifies the URL values for the CRL Distribution Points field`, - Required: true, - }, - "ocsp_servers": { - Type: framework.TypeStringSlice, - Description: `Specifies the URL values for the OCSP Servers field`, - Required: true, - }, - "revocation_time": { - Type: framework.TypeInt64, - Description: `Time of revocation`, - Required: false, - }, - "revocation_time_rfc3339": { - Type: framework.TypeTime, - Description: `RFC formatted time of revocation`, - Required: false, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -687,18 +432,18 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // include both in two separate CRLs. Hence, the former is the condition // we check in CRL building, but this step satisfies other guarantees // within Vault. - certEntry, err := fetchCertBySerial(sc, "certs/", issuer.SerialNumber) + certEntry, err := fetchCertBySerial(ctx, b, req, "certs/", issuer.SerialNumber) if err == nil && certEntry != nil { // We've inverted this error check as it doesn't matter; we already // consider this certificate revoked. storageCert, err := x509.ParseCertificate(certEntry.Value) if err != nil { - return nil, fmt.Errorf("error parsing stored certificate value: %w", err) + return nil, fmt.Errorf("error parsing stored certificate value: %v", err) } issuerCert, err := issuer.GetCertificate() if err != nil { - return nil, fmt.Errorf("error parsing issuer certificate value: %w", err) + return nil, fmt.Errorf("error parsing issuer certificate value: %v", err) } if bytes.Equal(issuerCert.Raw, storageCert.Raw) { @@ -719,18 +464,18 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da revEntry, err := logical.StorageEntryJSON(revokedPath+normalizeSerial(issuer.SerialNumber), revInfo) if err != nil { - return nil, fmt.Errorf("error creating revocation entry for issuer: %w", err) + return nil, fmt.Errorf("error creating revocation entry for issuer: %v", err) } err = req.Storage.Put(ctx, revEntry) if err != nil { - return nil, fmt.Errorf("error saving revoked issuer to new location: %w", err) + return nil, fmt.Errorf("error saving revoked issuer to new location: %v", err) } } } // Rebuild the CRL to include the newly revoked issuer. - warnings, crlErr := b.crlBuilder.rebuild(sc, false) + crlErr := b.crlBuilder.rebuild(ctx, b, req, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -746,9 +491,6 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // Impossible. return nil, err } - for index, warning := range warnings { - response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } // For sanity, we'll add a warning message here if there's no other // issuer which verifies this issuer. diff --git a/builtin/logical/pki/path_manage_keys.go b/builtin/logical/pki/path_manage_keys.go index 3c10c32903d34..90119ce4e8a1e 100644 --- a/builtin/logical/pki/path_manage_keys.go +++ b/builtin/logical/pki/path_manage_keys.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "bytes" "context" "encoding/pem" - "net/http" "strings" "github.com/hashicorp/vault/sdk/framework" @@ -19,12 +15,6 @@ func pathGenerateKey(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/generate/(internal|exported|kms)", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "generate", - OperationSuffix: "internal-key|exported-key|kms-key", - }, - Fields: map[string]*framework.FieldSchema{ keyNameParam: { Type: framework.TypeString, @@ -64,36 +54,7 @@ is required. Ignored for other types.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathGenerateKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: `ID assigned to this key.`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `Name assigned to this key.`, - Required: true, - }, - "key_type": { - Type: framework.TypeString, - Description: `The type of key to use; defaults to RSA. "rsa" - "ec" and "ed25519" are the only valid values.`, - Required: true, - }, - "private_key": { - Type: framework.TypeString, - Description: `The private key string`, - Required: false, - }, - }, - }}, - }, - + Callback: b.pathGenerateKeyHandler, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, @@ -188,12 +149,6 @@ func pathImportKey(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/import", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "import", - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ keyNameParam: { Type: framework.TypeString, @@ -207,30 +162,7 @@ func pathImportKey(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathImportKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: `ID assigned to this key.`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `Name assigned to this key.`, - Required: true, - }, - "key_type": { - Type: framework.TypeString, - Description: `The type of key to use; defaults to RSA. "rsa" - "ec" and "ed25519" are the only valid values.`, - Required: true, - }, - }, - }}, - }, + Callback: b.pathImportKeyHandler, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, diff --git a/builtin/logical/pki/path_manage_keys_test.go b/builtin/logical/pki/path_manage_keys_test.go index 3c5708a8bb04d..979f8a203d4ab 100644 --- a/builtin/logical/pki/path_manage_keys_test.go +++ b/builtin/logical/pki/path_manage_keys_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -12,8 +9,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -22,7 +17,7 @@ import ( func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) tests := []struct { name string @@ -88,7 +83,7 @@ func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { t.Parallel() // We tested a lot of the logic above within the internal test, so just make sure we honor the exported contract - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -100,8 +95,6 @@ func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { }, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/generate/exported"), logical.UpdateOperation), resp, true) - require.NoError(t, err, "Failed generating exported key") require.NotNil(t, resp, "Got nil response generating exported key") require.Equal(t, "ec", resp.Data["key_type"], "key_type field contained an invalid type") @@ -122,7 +115,7 @@ func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) require.NoError(t, err, "failed generating an ec key bundle") @@ -143,9 +136,6 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { }, MountPoint: "pki/", }) - - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/import"), logical.UpdateOperation), resp, true) - require.NoError(t, err, "Failed importing ec key") require.NotNil(t, resp, "Got nil response importing ec key") require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) @@ -258,7 +248,7 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -286,7 +276,7 @@ func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -313,7 +303,7 @@ func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -334,8 +324,6 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { Data: map[string]interface{}{"key_name": "new-name"}, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.UpdateOperation), resp, true) - require.NoError(t, err, "failed updating key with new name") require.NotNil(t, resp, "Got nil response updating key with new name") require.False(t, resp.IsError(), "unexpected error updating key with new name: %#v", resp.Error()) @@ -346,8 +334,6 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { Storage: s, MountPoint: "pki/", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.ReadOperation), resp, true) - require.NoError(t, err, "failed reading key after name update") require.NotNil(t, resp, "Got nil response reading key after name update") require.False(t, resp.IsError(), "unexpected error reading key: %#v", resp.Error()) @@ -370,7 +356,7 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { func TestPKI_PathManageKeys_ImportKeyBundleBadData(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -404,7 +390,7 @@ func TestPKI_PathManageKeys_ImportKeyBundleBadData(t *testing.T) { func TestPKI_PathManageKeys_ImportKeyRejectsMultipleKeys(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) require.NoError(t, err, "failed generating an ec key bundle") diff --git a/builtin/logical/pki/path_ocsp.go b/builtin/logical/pki/path_ocsp.go index b9f5cd1f9fd98..4ee6d38f68b3c 100644 --- a/builtin/logical/pki/path_ocsp.go +++ b/builtin/logical/pki/path_ocsp.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -16,7 +13,6 @@ import ( "io" "math/big" "net/http" - "strings" "time" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -71,33 +67,8 @@ var ( ) func buildPathOcspGet(b *backend) *framework.Path { - pattern := "ocsp/" + framework.MatchAllRegex(ocspReqParam) - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "ocsp-with-get-req", - } - - return buildOcspGetWithPath(b, pattern, displayAttrs) -} - -func buildPathUnifiedOcspGet(b *backend) *framework.Path { - pattern := "unified-ocsp/" + framework.MatchAllRegex(ocspReqParam) - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "unified-ocsp-with-get-req", - } - - return buildOcspGetWithPath(b, pattern, displayAttrs) -} - -func buildOcspGetWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { return &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: "ocsp/" + framework.MatchAllRegex(ocspReqParam), Fields: map[string]*framework.FieldSchema{ ocspReqParam: { Type: framework.TypeString, @@ -116,33 +87,8 @@ func buildOcspGetWithPath(b *backend, pattern string, displayAttrs *framework.Di } func buildPathOcspPost(b *backend) *framework.Path { - pattern := "ocsp" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "ocsp", - } - - return buildOcspPostWithPath(b, pattern, displayAttrs) -} - -func buildPathUnifiedOcspPost(b *backend) *framework.Path { - pattern := "unified-ocsp" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "unified-ocsp", - } - - return buildOcspPostWithPath(b, pattern, displayAttrs) -} - -func buildOcspPostWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { return &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, + Pattern: "ocsp", Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.ocspHandler, @@ -157,7 +103,7 @@ func buildOcspPostWithPath(b *backend, pattern string, displayAttrs *framework.D func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, request.Storage) cfg, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil || cfg.OcspDisable || (isUnifiedOcspPath(request) && !cfg.UnifiedCRL) { + if err != nil || cfg.OcspDisable { return OcspUnauthorizedResponse, nil } @@ -171,9 +117,7 @@ func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, dat return OcspMalformedResponse, nil } - useUnifiedStorage := canUseUnifiedStorage(request, cfg) - - ocspStatus, err := getOcspStatus(sc, ocspReq, useUnifiedStorage) + ocspStatus, err := getOcspStatus(sc, request, ocspReq) if err != nil { return logAndReturnInternalError(b, err), nil } @@ -210,20 +154,6 @@ func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, dat }, nil } -func canUseUnifiedStorage(req *logical.Request, cfg *crlConfig) bool { - if isUnifiedOcspPath(req) { - return true - } - - // We are operating on the existing /pki/ocsp path, both of these fields need to be enabled - // for us to use the unified path. - return shouldLocalPathsUseUnified(cfg) -} - -func isUnifiedOcspPath(req *logical.Request) bool { - return strings.HasPrefix(req.Path, "unified-ocsp") -} - func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there @@ -322,8 +252,8 @@ func logAndReturnInternalError(b *backend, err error) *logical.Response { return OcspInternalErrorResponse } -func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage bool) (*ocspRespInfo, error) { - revEntryRaw, err := fetchCertBySerialBigInt(sc, revokedPath, ocspReq.SerialNumber) +func getOcspStatus(sc *storageContext, request *logical.Request, ocspReq *ocsp.Request) (*ocspRespInfo, error) { + revEntryRaw, err := fetchCertBySerialBigInt(sc.Context, sc.Backend, request, revokedPath, ocspReq.SerialNumber) if err != nil { return nil, err } @@ -342,18 +272,6 @@ func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage info.ocspStatus = ocsp.Revoked info.revocationTimeUTC = &revEntry.RevocationTimeUTC info.issuerID = revEntry.CertificateIssuer // This might be empty if the CRL hasn't been rebuilt - } else if useUnifiedStorage { - dashSerial := normalizeSerialFromBigInt(ocspReq.SerialNumber) - unifiedEntry, err := getUnifiedRevocationBySerial(sc, dashSerial) - if err != nil { - return nil, err - } - - if unifiedEntry != nil { - info.ocspStatus = ocsp.Revoked - info.revocationTimeUTC = &unifiedEntry.RevocationTimeUTC - info.issuerID = unifiedEntry.CertificateIssuer - } } return &info, nil diff --git a/builtin/logical/pki/path_ocsp_test.go b/builtin/logical/pki/path_ocsp_test.go index ab1173fa34293..6504b3d3ffb26 100644 --- a/builtin/logical/pki/path_ocsp_test.go +++ b/builtin/logical/pki/path_ocsp_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -17,8 +14,6 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/vault" @@ -48,8 +43,8 @@ func TestOcsp_Disabled(t *testing.T) { resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ "ocsp_disable": "true", }) - requireSuccessNonNilResponse(t, resp, err) - resp, err = SendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNilResponse(t, resp, err) + resp, err = sendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) require.NoError(t, err) requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 401, resp.Data["http_status_code"]) @@ -69,9 +64,9 @@ func TestOcsp_UnknownIssuerWithNoDefault(t *testing.T) { _, _, testEnv := setupOcspEnv(t, "ec") // Create another completely empty mount so the created issuer/certificate above is unknown - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) - resp, err := SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + resp, err := sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) require.NoError(t, err) requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 401, resp.Data["http_status_code"]) @@ -93,7 +88,7 @@ func TestOcsp_WrongIssuerInRequest(t *testing.T) { }) requireSuccessNonNilResponse(t, resp, err, "revoke") - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) + resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) require.NoError(t, err) requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 200, resp.Data["http_status_code"]) @@ -175,7 +170,7 @@ func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) // Send the request - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) require.NoError(t, err) requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 200, resp.Data["http_status_code"]) @@ -202,7 +197,6 @@ func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ "serial_number": serial, }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke"), logical.UpdateOperation), resp, true) requireSuccessNonNilResponse(t, resp, err, "revoke") // Twiddle the entry so that the issuer id is no longer valid. @@ -229,7 +223,7 @@ func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer2") // Send the request - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) require.NoError(t, err) requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 401, resp.Data["http_status_code"]) @@ -266,7 +260,7 @@ func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { require.False(t, usages.HasUsage(OCSPSigningUsage)) // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) requireSuccessNonNilResponse(t, resp, err, "ocsp get request") requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 401, resp.Data["http_status_code"]) @@ -305,7 +299,7 @@ func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { requireSuccessNonNilResponse(t, resp, err, "failed deleting key") // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) requireSuccessNonNilResponse(t, resp, err, "ocsp get request") requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 401, resp.Data["http_status_code"]) @@ -351,7 +345,7 @@ func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { require.False(t, usages.HasUsage(OCSPSigningUsage)) // Request an OCSP request from it, we should get a Good response back, from the rotated cert - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) requireSuccessNonNilResponse(t, resp, err, "ocsp get request") requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 200, resp.Data["http_status_code"]) @@ -517,7 +511,7 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits) // Non-revoked cert - resp, err := SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) + resp, err := sendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) requireSuccessNonNilResponse(t, resp, err, "ocsp get request") requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 200, resp.Data["http_status_code"]) @@ -541,7 +535,7 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe }) requireSuccessNonNilResponse(t, resp, err, "revoke") - resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) + resp, err = sendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) requireSuccessNonNilResponse(t, resp, err, "ocsp get request with revoked") requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 200, resp.Data["http_status_code"]) @@ -560,7 +554,7 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) // Request status for our second issuer - resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) + resp, err = sendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) requireSuccessNonNilResponse(t, resp, err, "ocsp get request") requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") require.Equal(t, 200, resp.Data["http_status_code"]) @@ -592,8 +586,6 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe } func requireOcspSignatureAlgoForKey(t *testing.T, expected x509.SignatureAlgorithm, actual x509.SignatureAlgorithm) { - t.Helper() - require.Equal(t, expected.String(), actual.String()) } @@ -616,7 +608,7 @@ func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocs } func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int) (*backend, logical.Storage, *ocspTestEnv) { - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) var issuerCerts []*x509.Certificate var leafCerts []*x509.Certificate var issuerIds []issuerID @@ -644,7 +636,7 @@ func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, ca "issuer_ref": issuerId, "key_type": keyType, }) - requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) + requireSuccessNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) resp, err = CBWrite(b, s, "issue/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ "common_name": "test.foobar.com", @@ -675,9 +667,7 @@ func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, ca return b, s, testEnv } -func SendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { - t.Helper() - +func sendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { ocspRequest := generateRequest(t, requestHash, cert, issuer) switch strings.ToLower(getOrPost) { @@ -686,7 +676,7 @@ func SendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost stri case "post": return sendOcspPostRequest(b, s, ocspRequest) default: - t.Fatalf("unsupported value for SendOcspRequest getOrPost arg: %s", getOrPost) + t.Fatalf("unsupported value for sendOcspRequest getOrPost arg: %s", getOrPost) } return nil, nil } @@ -712,8 +702,6 @@ func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*lo } func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { - t.Helper() - opts := &ocsp.RequestOptions{Hash: requestHash} ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) require.NoError(t, err, "Failed generating OCSP request") @@ -721,8 +709,6 @@ func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certifica } func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { - t.Helper() - err := ocspResp.CheckSignatureFrom(issuer) require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) } diff --git a/builtin/logical/pki/path_resign_crls.go b/builtin/logical/pki/path_resign_crls.go deleted file mode 100644 index a82f94f32fbcd..0000000000000 --- a/builtin/logical/pki/path_resign_crls.go +++ /dev/null @@ -1,675 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "context" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/http" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - crlNumberParam = "crl_number" - deltaCrlBaseNumberParam = "delta_crl_base_number" - nextUpdateParam = "next_update" - crlsParam = "crls" - formatParam = "format" -) - -var ( - akOid = asn1.ObjectIdentifier{2, 5, 29, 35} - crlNumOid = asn1.ObjectIdentifier{2, 5, 29, 20} - deltaCrlOid = asn1.ObjectIdentifier{2, 5, 29, 27} -) - -func pathResignCrls(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/resign-crls", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "resign", - OperationSuffix: "crls", - }, - - Fields: map[string]*framework.FieldSchema{ - issuerRefParam: { - Type: framework.TypeString, - Description: `Reference to a existing issuer; either "default" -for the configured default issuer, an identifier or the name assigned -to the issuer.`, - Default: defaultRef, - }, - crlNumberParam: { - Type: framework.TypeInt, - Description: `The sequence number to be written within the CRL Number extension.`, - }, - deltaCrlBaseNumberParam: { - Type: framework.TypeInt, - Description: `Using a zero or greater value specifies the base CRL revision number to encode within - a Delta CRL indicator extension, otherwise the extension will not be added.`, - Default: -1, - }, - nextUpdateParam: { - Type: framework.TypeString, - Description: `The amount of time the generated CRL should be -valid; defaults to 72 hours.`, - Default: defaultCrlConfig.Expiry, - }, - crlsParam: { - Type: framework.TypeStringSlice, - Description: `A list of PEM encoded CRLs to combine, originally signed by the requested issuer.`, - }, - formatParam: { - Type: framework.TypeString, - Description: `The format of the combined CRL, can be "pem" or "der". If "der", the value will be -base64 encoded. Defaults to "pem".`, - Default: "pem", - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateResignCrlsHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "crl": { - Type: framework.TypeString, - Description: `CRL`, - Required: true, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: `Combine and sign with the provided issuer different CRLs`, - HelpDescription: `Provide two or more PEM encoded CRLs signed by the issuer, - normally from separate Vault clusters to be combined and signed.`, - } -} - -func pathSignRevocationList(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-revocation-list", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "sign", - OperationSuffix: "revocation-list", - }, - - Fields: map[string]*framework.FieldSchema{ - issuerRefParam: { - Type: framework.TypeString, - Description: `Reference to a existing issuer; either "default" -for the configured default issuer, an identifier or the name assigned -to the issuer.`, - Default: defaultRef, - }, - crlNumberParam: { - Type: framework.TypeInt, - Description: `The sequence number to be written within the CRL Number extension.`, - }, - deltaCrlBaseNumberParam: { - Type: framework.TypeInt, - Description: `Using a zero or greater value specifies the base CRL revision number to encode within - a Delta CRL indicator extension, otherwise the extension will not be added.`, - Default: -1, - }, - nextUpdateParam: { - Type: framework.TypeString, - Description: `The amount of time the generated CRL should be -valid; defaults to 72 hours.`, - Default: defaultCrlConfig.Expiry, - }, - formatParam: { - Type: framework.TypeString, - Description: `The format of the combined CRL, can be "pem" or "der". If "der", the value will be -base64 encoded. Defaults to "pem".`, - Default: "pem", - }, - "revoked_certs": { - Type: framework.TypeSlice, - Description: `A list of maps containing the keys serial_number (string), revocation_time (string), -and extensions (map with keys id (string), critical (bool), value (string))`, - }, - "extensions": { - Type: framework.TypeSlice, - Description: `A list of maps containing extensions with keys id (string), critical (bool), -value (string)`, - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateSignRevocationListHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "crl": { - Type: framework.TypeString, - Description: `CRL`, - Required: true, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: `Generate and sign a CRL based on the provided parameters.`, - HelpDescription: `Given a list of revoked certificates and other parameters, -return a signed CRL based on the parameter values.`, - } -} - -func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil - } - - issuerRef := getIssuerRef(data) - crlNumber := data.Get(crlNumberParam).(int) - deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) - nextUpdateStr := data.Get(nextUpdateParam).(string) - rawCrls := data.Get(crlsParam).([]string) - - format, err := parseCrlFormat(data.Get(formatParam).(string)) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - nextUpdateOffset, err := parseutil.ParseDurationSecond(nextUpdateStr) - if err != nil { - return logical.ErrorResponse("invalid value for %s: %v", nextUpdateParam, err), nil - } - - if nextUpdateOffset <= 0 { - return logical.ErrorResponse("%s parameter must be greater than 0", nextUpdateParam), nil - } - - if crlNumber < 0 { - return logical.ErrorResponse("%s parameter must be 0 or greater", crlNumberParam), nil - } - if deltaCrlBaseNumber < -1 { - return logical.ErrorResponse("%s parameter must be -1 or greater", deltaCrlBaseNumberParam), nil - } - - if issuerRef == "" { - return logical.ErrorResponse("%s parameter cannot be blank", issuerRefParam), nil - } - - providedCrls, err := decodePemCrls(rawCrls) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - sc := b.makeStorageContext(ctx, request.Storage) - caBundle, err := getCaBundle(sc, issuerRef) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - if err := verifyCrlsAreFromIssuersKey(caBundle.Certificate, providedCrls); err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - revokedCerts, warnings, err := getAllRevokedCertsFromPem(providedCrls) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - now := time.Now() - template := &x509.RevocationList{ - SignatureAlgorithm: caBundle.RevocationSigAlg, - RevokedCertificates: revokedCerts, - Number: big.NewInt(int64(crlNumber)), - ThisUpdate: now, - NextUpdate: now.Add(nextUpdateOffset), - } - - if deltaCrlBaseNumber > -1 { - ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) - if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) - } - template.ExtraExtensions = []pkix.Extension{ext} - } - - crlBytes, err := x509.CreateRevocationList(rand.Reader, template, caBundle.Certificate, caBundle.PrivateKey) - if err != nil { - return nil, fmt.Errorf("error creating new CRL: %w", err) - } - - body := encodeResponse(crlBytes, format == "der") - - return &logical.Response{ - Warnings: warnings, - Data: map[string]interface{}{ - "crl": body, - }, - }, nil -} - -func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil - } - - issuerRef := getIssuerRef(data) - crlNumber := data.Get(crlNumberParam).(int) - deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) - nextUpdateStr := data.Get(nextUpdateParam).(string) - nextUpdateOffset, err := parseutil.ParseDurationSecond(nextUpdateStr) - if err != nil { - return logical.ErrorResponse("invalid value for %s: %v", nextUpdateParam, err), nil - } - - if nextUpdateOffset <= 0 { - return logical.ErrorResponse("%s parameter must be greater than 0", nextUpdateParam), nil - } - - if crlNumber < 0 { - return logical.ErrorResponse("%s parameter must be 0 or greater", crlNumberParam), nil - } - if deltaCrlBaseNumber < -1 { - return logical.ErrorResponse("%s parameter must be -1 or greater", deltaCrlBaseNumberParam), nil - } - - if issuerRef == "" { - return logical.ErrorResponse("%s parameter cannot be blank", issuerRefParam), nil - } - - format, err := parseCrlFormat(data.Get(formatParam).(string)) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - revokedCerts, err := parseRevokedCertsParam(data.Get("revoked_certs").([]interface{})) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - crlExtensions, err := parseExtensionsParam(data.Get("extensions").([]interface{})) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - sc := b.makeStorageContext(ctx, request.Storage) - caBundle, err := getCaBundle(sc, issuerRef) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - if deltaCrlBaseNumber > -1 { - ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) - if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) - } - crlExtensions = append(crlExtensions, ext) - } - - now := time.Now() - template := &x509.RevocationList{ - SignatureAlgorithm: caBundle.RevocationSigAlg, - RevokedCertificates: revokedCerts, - Number: big.NewInt(int64(crlNumber)), - ThisUpdate: now, - NextUpdate: now.Add(nextUpdateOffset), - ExtraExtensions: crlExtensions, - } - - crlBytes, err := x509.CreateRevocationList(rand.Reader, template, caBundle.Certificate, caBundle.PrivateKey) - if err != nil { - return nil, fmt.Errorf("error creating new CRL: %w", err) - } - - body := encodeResponse(crlBytes, format == "der") - - return &logical.Response{ - Data: map[string]interface{}{ - "crl": body, - }, - }, nil -} - -func parseRevokedCertsParam(revokedCerts []interface{}) ([]pkix.RevokedCertificate, error) { - var parsedCerts []pkix.RevokedCertificate - seenSerials := make(map[*big.Int]int) - for i, entry := range revokedCerts { - if revokedCert, ok := entry.(map[string]interface{}); ok { - serialNum, err := parseSerialNum(revokedCert) - if err != nil { - return nil, fmt.Errorf("failed parsing serial_number from entry %d: %w", i, err) - } - - if origEntry, exists := seenSerials[serialNum]; exists { - serialNumStr := revokedCert["serial_number"] - return nil, fmt.Errorf("duplicate serial number: %s, original entry %d and %d", serialNumStr, origEntry, i) - } - - seenSerials[serialNum] = i - - revocationTime, err := parseRevocationTime(revokedCert) - if err != nil { - return nil, fmt.Errorf("failed parsing revocation_time from entry %d: %w", i, err) - } - - extensions, err := parseCertExtensions(revokedCert) - if err != nil { - return nil, fmt.Errorf("failed parsing extensions from entry %d: %w", i, err) - } - - parsedCerts = append(parsedCerts, pkix.RevokedCertificate{ - SerialNumber: serialNum, - RevocationTime: revocationTime, - Extensions: extensions, - }) - } - } - - return parsedCerts, nil -} - -func parseCertExtensions(cert map[string]interface{}) ([]pkix.Extension, error) { - extRaw, exists := cert["extensions"] - if !exists || extRaw == nil || extRaw == "" { - // We don't require extensions to be populated - return []pkix.Extension{}, nil - } - - extListRaw, ok := extRaw.([]interface{}) - if !ok { - return nil, errors.New("'extensions' field did not contain a slice") - } - - return parseExtensionsParam(extListRaw) -} - -func parseExtensionsParam(extRawList []interface{}) ([]pkix.Extension, error) { - var extensions []pkix.Extension - seenOid := make(map[string]struct{}) - for i, entryRaw := range extRawList { - entry, ok := entryRaw.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("extension entry %d not a map", i) - } - extension, err := parseExtension(entry) - if err != nil { - return nil, fmt.Errorf("failed parsing extension entry %d: %w", i, err) - } - - parsedIdStr := extension.Id.String() - if _, exists := seenOid[parsedIdStr]; exists { - return nil, fmt.Errorf("duplicate extension id: %s", parsedIdStr) - } - - seenOid[parsedIdStr] = struct{}{} - - extensions = append(extensions, extension) - } - - return extensions, nil -} - -func parseExtension(entry map[string]interface{}) (pkix.Extension, error) { - asnObjectId, err := parseExtAsn1ObjectId(entry) - if err != nil { - return pkix.Extension{}, err - } - - if asnObjectId.Equal(akOid) { - return pkix.Extension{}, fmt.Errorf("authority key object identifier (%s) is reserved", akOid.String()) - } - - if asnObjectId.Equal(crlNumOid) { - return pkix.Extension{}, fmt.Errorf("crl number object identifier (%s) is reserved", crlNumOid.String()) - } - - if asnObjectId.Equal(deltaCrlOid) { - return pkix.Extension{}, fmt.Errorf("delta crl object identifier (%s) is reserved", deltaCrlOid.String()) - } - - critical, err := parseExtCritical(entry) - if err != nil { - return pkix.Extension{}, err - } - - extVal, err := parseExtValue(entry) - if err != nil { - return pkix.Extension{}, err - } - - return pkix.Extension{ - Id: asnObjectId, - Critical: critical, - Value: extVal, - }, nil -} - -func parseExtValue(entry map[string]interface{}) ([]byte, error) { - valRaw, exists := entry["value"] - if !exists { - return nil, errors.New("missing 'value' field") - } - - valStr, err := parseutil.ParseString(valRaw) - if err != nil { - return nil, fmt.Errorf("'value' field value was not a string: %w", err) - } - - if len(valStr) == 0 { - return []byte{}, nil - } - - decodeString, err := base64.StdEncoding.DecodeString(valStr) - if err != nil { - return nil, fmt.Errorf("failed base64 decoding 'value' field: %w", err) - } - return decodeString, nil -} - -func parseExtCritical(entry map[string]interface{}) (bool, error) { - critRaw, exists := entry["critical"] - if !exists || critRaw == nil || critRaw == "" { - // Optional field, so just return as if they provided the value false. - return false, nil - } - - myBool, err := parseutil.ParseBool(critRaw) - if err != nil { - return false, fmt.Errorf("critical field value failed to be parsed: %w", err) - } - - return myBool, nil -} - -func parseExtAsn1ObjectId(entry map[string]interface{}) (asn1.ObjectIdentifier, error) { - idRaw, idExists := entry["id"] - if !idExists { - return asn1.ObjectIdentifier{}, errors.New("missing id field") - } - - oidStr, err := parseutil.ParseString(idRaw) - if err != nil { - return nil, fmt.Errorf("'id' field value was not a string: %w", err) - } - - if len(oidStr) == 0 { - return asn1.ObjectIdentifier{}, errors.New("zero length object identifier") - } - - // Parse out dot notation - oidParts := strings.Split(oidStr, ".") - oid := make(asn1.ObjectIdentifier, len(oidParts), len(oidParts)) - for i := range oidParts { - oidIntVal, err := strconv.Atoi(oidParts[i]) - if err != nil { - return nil, fmt.Errorf("failed parsing asn1 index element %d value %s: %w", i, oidParts[i], err) - } - oid[i] = oidIntVal - } - return oid, nil -} - -func parseRevocationTime(cert map[string]interface{}) (time.Time, error) { - var revTime time.Time - revTimeRaw, exists := cert["revocation_time"] - if !exists { - return revTime, errors.New("missing 'revocation_time' field") - } - revTime, err := parseutil.ParseAbsoluteTime(revTimeRaw) - if err != nil { - return revTime, fmt.Errorf("failed parsing time %v: %w", revTimeRaw, err) - } - return revTime, nil -} - -func parseSerialNum(cert map[string]interface{}) (*big.Int, error) { - serialNumRaw, serialExists := cert["serial_number"] - if !serialExists { - return nil, errors.New("missing 'serial_number' field") - } - serialNumStr, err := parseutil.ParseString(serialNumRaw) - if err != nil { - return nil, fmt.Errorf("'serial_number' field value was not a string: %w", err) - } - // Clean up any provided serials to decoder - for _, separator := range []string{":", ".", "-", " "} { - serialNumStr = strings.ReplaceAll(serialNumStr, separator, "") - } - // Prefer hex.DecodeString over certutil.ParseHexFormatted as we don't need a separator - serialBytes, err := hex.DecodeString(serialNumStr) - if err != nil { - return nil, fmt.Errorf("'serial_number' failed converting to bytes: %w", err) - } - - bigIntSerial := big.Int{} - bigIntSerial.SetBytes(serialBytes) - return &bigIntSerial, nil -} - -func parseCrlFormat(requestedValue string) (string, error) { - format := strings.ToLower(requestedValue) - switch format { - case "pem", "der": - return format, nil - default: - return "", fmt.Errorf("unknown format value of %s", requestedValue) - } -} - -func verifyCrlsAreFromIssuersKey(caCert *x509.Certificate, crls []*x509.RevocationList) error { - for i, crl := range crls { - // At this point we assume if the issuer's key signed the CRL that is a good enough check - // to validate that we owned/generated the provided CRL. - if err := crl.CheckSignatureFrom(caCert); err != nil { - return fmt.Errorf("CRL index: %d was not signed by requested issuer", i) - } - } - - return nil -} - -func encodeResponse(crlBytes []byte, derFormatRequested bool) string { - if derFormatRequested { - return base64.StdEncoding.EncodeToString(crlBytes) - } - - block := pem.Block{ - Type: "X509 CRL", - Bytes: crlBytes, - } - return string(pem.EncodeToMemory(&block)) -} - -func getAllRevokedCertsFromPem(crls []*x509.RevocationList) ([]pkix.RevokedCertificate, []string, error) { - uniqueCert := map[string]pkix.RevokedCertificate{} - var warnings []string - for _, crl := range crls { - for _, curCert := range crl.RevokedCertificates { - serial := serialFromBigInt(curCert.SerialNumber) - // Get rid of any extensions the existing certificate might have had. - curCert.Extensions = []pkix.Extension{} - - existingCert, exists := uniqueCert[serial] - if !exists { - // First time we see the revoked cert - uniqueCert[serial] = curCert - continue - } - - if existingCert.RevocationTime.Equal(curCert.RevocationTime) { - // Same revocation times, just skip it - continue - } - - warn := fmt.Sprintf("Duplicate serial %s with different revocation "+ - "times detected, using oldest revocation time", serial) - warnings = append(warnings, warn) - - if existingCert.RevocationTime.After(curCert.RevocationTime) { - uniqueCert[serial] = curCert - } - } - } - - var revokedCerts []pkix.RevokedCertificate - for _, cert := range uniqueCert { - revokedCerts = append(revokedCerts, cert) - } - - return revokedCerts, warnings, nil -} - -func getCaBundle(sc *storageContext, issuerRef string) (*certutil.CAInfoBundle, error) { - issuerId, err := sc.resolveIssuerReference(issuerRef) - if err != nil { - return nil, fmt.Errorf("failed to resolve issuer %s: %w", issuerRefParam, err) - } - - return sc.fetchCAInfoByIssuerId(issuerId, CRLSigningUsage) -} - -func decodePemCrls(rawCrls []string) ([]*x509.RevocationList, error) { - var crls []*x509.RevocationList - for i, rawCrl := range rawCrls { - crl, err := decodePemCrl(rawCrl) - if err != nil { - return nil, fmt.Errorf("failed decoding crl %d: %w", i, err) - } - crls = append(crls, crl) - } - - return crls, nil -} - -func decodePemCrl(crl string) (*x509.RevocationList, error) { - block, rest := pem.Decode([]byte(crl)) - if len(rest) != 0 { - return nil, errors.New("invalid crl; should be one PEM block only") - } - - return x509.ParseRevocationList(block.Bytes) -} diff --git a/builtin/logical/pki/path_resign_crls_test.go b/builtin/logical/pki/path_resign_crls_test.go deleted file mode 100644 index f1ee1152c041a..0000000000000 --- a/builtin/logical/pki/path_resign_crls_test.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "fmt" - "math/big" - "testing" - "time" - - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - - "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/vault" - - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" -) - -func TestResignCrls_ForbidSigningOtherIssuerCRL(t *testing.T) { - t.Parallel() - - // Some random CRL from another issuer - pem1 := "-----BEGIN X509 CRL-----\nMIIBvjCBpwIBATANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDExByb290LWV4YW1w\nbGUuY29tFw0yMjEwMjYyMTI5MzlaFw0yMjEwMjkyMTI5MzlaMCcwJQIUSnVf8wsd\nHjOt9drCYFhWxS9QqGoXDTIyMTAyNjIxMjkzOVqgLzAtMB8GA1UdIwQYMBaAFHki\nZ0XDUQVSajNRGXrg66OaIFlYMAoGA1UdFAQDAgEDMA0GCSqGSIb3DQEBCwUAA4IB\nAQBGIdtqTwemnLZF5AoP+jzvKZ26S3y7qvRIzd7f4A0EawzYmWXSXfwqo4TQ4DG3\nnvT+AaA1zCCOlH/1U+ufN9gSSN0j9ax58brSYMnMskMCqhLKIp0qnvS4jr/gopmF\nv8grbvLHEqNYTu1T7umMLdNQUsWT3Qc+EIjfoKj8xD2FHsZwJ+EMbytwl8Unipjr\nhz4rmcES/65vavfdFpOI6YXfi+UAaHBdkTqmHgg4BdpuXfYtlf+iotFSOkygD5fl\n0D+RVFW9uJv2WfbQ7kRt1X/VcFk/onw0AQqxZRVUzvjoMw+EMcxSq3UKOlXcWDxm\nEFz9rFQQ66L388EP8RD7Dh3X\n-----END X509 CRL-----" - - b, s := CreateBackendWithStorage(t) - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "test.com", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err) - - resp, err = CBWrite(b, s, "issuer/default/resign-crls", map[string]interface{}{ - "crl_number": "2", - "next_update": "1h", - "format": "pem", - "crls": []string{pem1}, - }) - require.ErrorContains(t, err, "was not signed by requested issuer") -} - -func TestResignCrls_NormalCrl(t *testing.T) { - t.Parallel() - b1, s1 := CreateBackendWithStorage(t) - b2, s2 := CreateBackendWithStorage(t) - - // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. - caCert, serial1, serial2, crl1, crl2 := setupResignCrlMounts(t, b1, s1, b2, s2) - - // Attempt to combine the CRLs - resp, err := CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ - "crl_number": "2", - "next_update": "1h", - "format": "pem", - "crls": []string{crl1, crl2}, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b1.Route("issuer/default/resign-crls"), logical.UpdateOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "crl") - pemCrl := resp.Data["crl"].(string) - combinedCrl, err := decodePemCrl(pemCrl) - require.NoError(t, err, "failed decoding combined CRL") - serials := extractSerialsFromCrl(t, combinedCrl) - - require.Contains(t, serials, serial1) - require.Contains(t, serials, serial2) - require.Equal(t, 2, len(serials), "serials contained more serials than expected") - - require.Equal(t, big.NewInt(int64(2)), combinedCrl.Number) - require.Equal(t, combinedCrl.ThisUpdate.Add(1*time.Hour), combinedCrl.NextUpdate) - - extensions := combinedCrl.Extensions - requireExtensionOid(t, []int{2, 5, 29, 20}, extensions) // CRL Number Extension - requireExtensionOid(t, []int{2, 5, 29, 35}, extensions) // akidOid - require.Equal(t, 2, len(extensions)) - - err = combinedCrl.CheckSignatureFrom(caCert) - require.NoError(t, err, "failed signature check of CRL") -} - -func TestResignCrls_EliminateDuplicates(t *testing.T) { - t.Parallel() - b1, s1 := CreateBackendWithStorage(t) - b2, s2 := CreateBackendWithStorage(t) - - // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. - _, serial1, _, crl1, _ := setupResignCrlMounts(t, b1, s1, b2, s2) - - // Pass in the same CRLs to make sure we do not duplicate entries - resp, err := CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ - "crl_number": "2", - "next_update": "1h", - "format": "pem", - "crls": []string{crl1, crl1}, - }) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "crl") - pemCrl := resp.Data["crl"].(string) - combinedCrl, err := decodePemCrl(pemCrl) - require.NoError(t, err, "failed decoding combined CRL") - - // Technically this will die if we have duplicates. - serials := extractSerialsFromCrl(t, combinedCrl) - - // We should have no warnings about collisions if they have the same revoked time - require.Empty(t, resp.Warnings, "expected no warnings in response") - - require.Contains(t, serials, serial1) - require.Equal(t, 1, len(serials), "serials contained more serials than expected") -} - -func TestResignCrls_ConflictingExpiry(t *testing.T) { - t.Parallel() - b1, s1 := CreateBackendWithStorage(t) - b2, s2 := CreateBackendWithStorage(t) - - // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. - _, serial1, serial2, crl1, _ := setupResignCrlMounts(t, b1, s1, b2, s2) - - timeAfterMountSetup := time.Now() - - // Read in serial1 from mount 1 - resp, err := CBRead(b1, s1, "cert/"+serial1) - requireSuccessNonNilResponse(t, resp, err, "failed reading serial 1's certificate") - requireFieldsSetInResp(t, resp, "certificate") - cert1 := resp.Data["certificate"].(string) - - // Wait until at least we have rolled over to the next second to match sure the generated CRL time - // on backend 2 for the serial 1 will be different - for { - if time.Now().After(timeAfterMountSetup.Add(1 * time.Second)) { - break - } - } - - // Use BYOC to revoke the same certificate on backend 2 now - resp, err = CBWrite(b2, s2, "revoke", map[string]interface{}{ - "certificate": cert1, - }) - requireSuccessNonNilResponse(t, resp, err, "failed revoking serial 1 on backend 2") - - // Fetch the new CRL from backend2 now - resp, err = CBRead(b2, s2, "cert/crl") - requireSuccessNonNilResponse(t, resp, err, "error fetch crl from backend 2") - requireFieldsSetInResp(t, resp, "certificate") - crl2 := resp.Data["certificate"].(string) - - // Attempt to combine the CRLs - resp, err = CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ - "crl_number": "2", - "next_update": "1h", - "format": "pem", - "crls": []string{crl2, crl1}, // Make sure we don't just grab the first colliding entry... - }) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "crl") - pemCrl := resp.Data["crl"].(string) - combinedCrl, err := decodePemCrl(pemCrl) - require.NoError(t, err, "failed decoding combined CRL") - combinedSerials := extractSerialsFromCrl(t, combinedCrl) - - require.Contains(t, combinedSerials, serial1) - require.Contains(t, combinedSerials, serial2) - require.Equal(t, 2, len(combinedSerials), "serials contained more serials than expected") - - // Make sure we issued a warning about the time collision - require.NotEmpty(t, resp.Warnings, "expected at least one warning") - require.Contains(t, resp.Warnings[0], "different revocation times detected") - - // Make sure we have the initial revocation time from backend 1 within the combined CRL. - decodedCrl1, err := decodePemCrl(crl1) - require.NoError(t, err, "failed decoding crl from backend 1") - serialsFromBackend1 := extractSerialsFromCrl(t, decodedCrl1) - - require.Equal(t, serialsFromBackend1[serial1], combinedSerials[serial1]) - - // Make sure we have the initial revocation time from backend 1 does not match with backend 2's time - decodedCrl2, err := decodePemCrl(crl2) - require.NoError(t, err, "failed decoding crl from backend 2") - serialsFromBackend2 := extractSerialsFromCrl(t, decodedCrl2) - - require.NotEqual(t, serialsFromBackend1[serial1], serialsFromBackend2[serial1]) -} - -func TestResignCrls_DeltaCrl(t *testing.T) { - t.Parallel() - - b1, s1 := CreateBackendWithStorage(t) - b2, s2 := CreateBackendWithStorage(t) - - // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. - caCert, serial1, serial2, crl1, crl2 := setupResignCrlMounts(t, b1, s1, b2, s2) - - resp, err := CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ - "crl_number": "5", - "delta_crl_base_number": "4", - "next_update": "12h", - "format": "pem", - "crls": []string{crl1, crl2}, - }) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "crl") - pemCrl := resp.Data["crl"].(string) - combinedCrl, err := decodePemCrl(pemCrl) - require.NoError(t, err, "failed decoding combined CRL") - serials := extractSerialsFromCrl(t, combinedCrl) - - require.Contains(t, serials, serial1) - require.Contains(t, serials, serial2) - require.Equal(t, 2, len(serials), "serials contained more serials than expected") - - require.Equal(t, big.NewInt(int64(5)), combinedCrl.Number) - require.Equal(t, combinedCrl.ThisUpdate.Add(12*time.Hour), combinedCrl.NextUpdate) - - extensions := combinedCrl.Extensions - requireExtensionOid(t, []int{2, 5, 29, 27}, extensions) // Delta CRL Extension - requireExtensionOid(t, []int{2, 5, 29, 20}, extensions) // CRL Number Extension - requireExtensionOid(t, []int{2, 5, 29, 35}, extensions) // akidOid - require.Equal(t, 3, len(extensions)) - - err = combinedCrl.CheckSignatureFrom(caCert) - require.NoError(t, err, "failed signature check of CRL") -} - -func TestSignRevocationList(t *testing.T) { - t.Parallel() - - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI, use this form of backend so our request is closer to reality (json parsed) - err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - require.NoError(t, err) - - // Generate internal CA. - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - require.NoError(t, err) - caCert := parseCert(t, resp.Data["certificate"].(string)) - - resp, err = client.Logical().Write("pki/issuer/default/sign-revocation-list", map[string]interface{}{ - "crl_number": "1", - "next_update": "12h", - "format": "pem", - "revoked_certs": []map[string]interface{}{ - { - "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", - "revocation_time": "1668614976", - "extensions": []map[string]interface{}{}, - }, - { - "serial_number": "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:82", - "revocation_time": "2022-11-16T16:09:36.739592Z", - }, - { - "serial_number": "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:81", - "revocation_time": "2022-10-16T16:09:36.739592Z", - "extensions": []map[string]interface{}{ - { - "id": "2.5.29.100", - "critical": "true", - "value": "aGVsbG8=", // "hello" base64 encoded - }, - { - "id": "2.5.29.101", - "critical": "false", - "value": "Ynll", // "bye" base64 encoded - }, - }, - }, - }, - "extensions": []map[string]interface{}{ - { - "id": "2.5.29.200", - "critical": "true", - "value": "aGVsbG8=", // "hello" base64 encoded - }, - { - "id": "2.5.29.201", - "critical": "false", - "value": "Ynll", // "bye" base64 encoded - }, - }, - }) - require.NoError(t, err) - pemCrl := resp.Data["crl"].(string) - crl, err := decodePemCrl(pemCrl) - require.NoError(t, err, "failed decoding CRL") - serials := extractSerialsFromCrl(t, crl) - require.Contains(t, serials, "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8") - require.Contains(t, serials, "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:82") - require.Contains(t, serials, "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:81") - require.Equal(t, 3, len(serials), "expected 3 serials within CRL") - - // Make sure extensions on serials match what we expect. - require.Equal(t, 0, len(crl.RevokedCertificates[0].Extensions), "Expected no extensions on 1st serial") - require.Equal(t, 0, len(crl.RevokedCertificates[1].Extensions), "Expected no extensions on 2nd serial") - require.Equal(t, 2, len(crl.RevokedCertificates[2].Extensions), "Expected 2 extensions on 3 serial") - require.Equal(t, "2.5.29.100", crl.RevokedCertificates[2].Extensions[0].Id.String()) - require.True(t, crl.RevokedCertificates[2].Extensions[0].Critical) - require.Equal(t, []byte("hello"), crl.RevokedCertificates[2].Extensions[0].Value) - - require.Equal(t, "2.5.29.101", crl.RevokedCertificates[2].Extensions[1].Id.String()) - require.False(t, crl.RevokedCertificates[2].Extensions[1].Critical) - require.Equal(t, []byte("bye"), crl.RevokedCertificates[2].Extensions[1].Value) - - // CRL Number and times - require.Equal(t, big.NewInt(int64(1)), crl.Number) - require.Equal(t, crl.ThisUpdate.Add(12*time.Hour), crl.NextUpdate) - - // Verify top level extensions are present - extensions := crl.Extensions - requireExtensionOid(t, []int{2, 5, 29, 20}, extensions) // CRL Number Extension - requireExtensionOid(t, []int{2, 5, 29, 35}, extensions) // akidOid - requireExtensionOid(t, []int{2, 5, 29, 200}, extensions) // Added value from param - requireExtensionOid(t, []int{2, 5, 29, 201}, extensions) // Added value from param - require.Equal(t, 4, len(extensions)) - - // Signature - err = crl.CheckSignatureFrom(caCert) - require.NoError(t, err, "failed signature check of CRL") -} - -func TestSignRevocationList_NoRevokedCerts(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "test.com", - }) - requireSuccessNonNilResponse(t, resp, err) - - resp, err = CBWrite(b, s, "issuer/default/sign-revocation-list", map[string]interface{}{ - "crl_number": "10000", - "next_update": "12h", - "format": "pem", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default/sign-revocation-list"), logical.UpdateOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "crl") - pemCrl := resp.Data["crl"].(string) - crl, err := decodePemCrl(pemCrl) - require.NoError(t, err, "failed decoding CRL") - - serials := extractSerialsFromCrl(t, crl) - require.Equal(t, 0, len(serials), "no serials were expected in CRL") - - require.Equal(t, big.NewInt(int64(10000)), crl.Number) - require.Equal(t, crl.ThisUpdate.Add(12*time.Hour), crl.NextUpdate) -} - -func TestSignRevocationList_ReservedExtensions(t *testing.T) { - t.Parallel() - - reservedOids := []asn1.ObjectIdentifier{ - akOid, deltaCrlOid, crlNumOid, - } - // Validate there isn't copy/paste issues with our constants... - require.Equal(t, asn1.ObjectIdentifier{2, 5, 29, 27}, deltaCrlOid) // Delta CRL Extension - require.Equal(t, asn1.ObjectIdentifier{2, 5, 29, 20}, crlNumOid) // CRL Number Extension - require.Equal(t, asn1.ObjectIdentifier{2, 5, 29, 35}, akOid) // akidOid - - for _, reservedOid := range reservedOids { - t.Run(reservedOid.String(), func(t *testing.T) { - b, s := CreateBackendWithStorage(t) - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "test.com", - }) - requireSuccessNonNilResponse(t, resp, err) - - resp, err = CBWrite(b, s, "issuer/default/sign-revocation-list", map[string]interface{}{ - "crl_number": "1", - "next_update": "12h", - "format": "pem", - "extensions": []map[string]interface{}{ - { - "id": reservedOid.String(), - "critical": "false", - "value": base64.StdEncoding.EncodeToString([]byte("hello")), - }, - }, - }) - - require.ErrorContains(t, err, "is reserved") - }) - } -} - -func setupResignCrlMounts(t *testing.T, b1 *backend, s1 logical.Storage, b2 *backend, s2 logical.Storage) (*x509.Certificate, string, string, string, string) { - t.Helper() - - // Setup two mounts with the same CA/key material - resp, err := CBWrite(b1, s1, "root/generate/exported", map[string]interface{}{ - "common_name": "test.com", - }) - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "certificate", "private_key") - pemCaCert := resp.Data["certificate"].(string) - caCert := parseCert(t, pemCaCert) - privKey := resp.Data["private_key"].(string) - - // Import the above key/cert into another mount - resp, err = CBWrite(b2, s2, "config/ca", map[string]interface{}{ - "pem_bundle": pemCaCert + "\n" + privKey, - }) - requireSuccessNonNilResponse(t, resp, err, "error setting up CA on backend 2") - - // Create the same role in both mounts - resp, err = CBWrite(b1, s1, "roles/test", map[string]interface{}{ - "allowed_domains": "test.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - requireSuccessNonNilResponse(t, resp, err, "error setting up pki role on backend 1") - - resp, err = CBWrite(b2, s2, "roles/test", map[string]interface{}{ - "allowed_domains": "test.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - requireSuccessNonNilResponse(t, resp, err, "error setting up pki role on backend 2") - - // Issue and revoke a cert in backend 1 - resp, err = CBWrite(b1, s1, "issue/test", map[string]interface{}{ - "common_name": "test1.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert from backend 1") - requireFieldsSetInResp(t, resp, "serial_number") - serial1 := resp.Data["serial_number"].(string) - - resp, err = CBWrite(b1, s1, "revoke", map[string]interface{}{"serial_number": serial1}) - requireSuccessNonNilResponse(t, resp, err, "error revoking cert from backend 2") - - // Issue and revoke a cert in backend 2 - resp, err = CBWrite(b2, s2, "issue/test", map[string]interface{}{ - "common_name": "test1.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert from backend 2") - requireFieldsSetInResp(t, resp, "serial_number") - serial2 := resp.Data["serial_number"].(string) - - resp, err = CBWrite(b2, s2, "revoke", map[string]interface{}{"serial_number": serial2}) - requireSuccessNonNilResponse(t, resp, err, "error revoking cert from backend 2") - - // Fetch PEM CRLs from each - resp, err = CBRead(b1, s1, "cert/crl") - requireSuccessNonNilResponse(t, resp, err, "error fetch crl from backend 1") - requireFieldsSetInResp(t, resp, "certificate") - crl1 := resp.Data["certificate"].(string) - - resp, err = CBRead(b2, s2, "cert/crl") - requireSuccessNonNilResponse(t, resp, err, "error fetch crl from backend 2") - requireFieldsSetInResp(t, resp, "certificate") - crl2 := resp.Data["certificate"].(string) - return caCert, serial1, serial2, crl1, crl2 -} - -func requireExtensionOid(t *testing.T, identifier asn1.ObjectIdentifier, extensions []pkix.Extension, msgAndArgs ...interface{}) { - t.Helper() - - found := false - var oidsInExtensions []string - for _, extension := range extensions { - oidsInExtensions = append(oidsInExtensions, extension.Id.String()) - if extension.Id.Equal(identifier) { - found = true - break - } - } - - if !found { - msg := fmt.Sprintf("Failed to find matching asn oid %s out of %v", identifier.String(), oidsInExtensions) - require.Fail(t, msg, msgAndArgs) - } -} - -func extractSerialsFromCrl(t *testing.T, crl *x509.RevocationList) map[string]time.Time { - t.Helper() - - serials := map[string]time.Time{} - - for _, revokedCert := range crl.RevokedCertificates { - serial := serialFromBigInt(revokedCert.SerialNumber) - if _, exists := serials[serial]; exists { - t.Fatalf("Serial number %s was duplicated in CRL", serial) - } - serials[serial] = revokedCert.RevocationTime - } - return serials -} diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go index 7aa23bc723052..d33c013caf9f3 100644 --- a/builtin/logical/pki/path_revoke.go +++ b/builtin/logical/pki/path_revoke.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" - "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -13,79 +9,18 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "net/http" "strings" - "time" - - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) -func pathListCertsRevoked(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "certs/revoked/?$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "revoked-certs", - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ListOperation: &framework.PathOperation{ - Callback: b.pathListRevokedCertsHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `List of Keys`, - Required: false, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: pathListRevokedHelpSyn, - HelpDescription: pathListRevokedHelpDesc, - } -} - -func pathListCertsRevocationQueue(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "certs/revocation-queue/?$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "certs-revocation-queue", - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ListOperation: &framework.PathOperation{ - Callback: b.pathListRevocationQueueHandler, - }, - }, - - HelpSynopsis: pathListRevocationQueueHelpSyn, - HelpDescription: pathListRevocationQueueHelpDesc, - } -} - func pathRevoke(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "revoke", - }, - Fields: map[string]*framework.FieldSchema{ "serial_number": { Type: framework.TypeString, @@ -106,28 +41,6 @@ signed by an issuer in this mount.`, // If this needs to write, the entire request will be forwarded to the // active node of the current performance cluster, but we don't want to // forward invalid revoke requests there. - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "revocation_time": { - Type: framework.TypeDurationSecond, - Description: `Revocation Time`, - Required: false, - }, - "revocation_time_rfc3339": { - Type: framework.TypeTime, - Description: `Revocation Time`, - Required: false, - }, - "state": { - Type: framework.TypeString, - Description: `Revocation State`, - Required: false, - }, - }, - }}, - }, }, }, @@ -139,13 +52,6 @@ signed by an issuer in this mount.`, func pathRevokeWithKey(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke-with-key`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "revoke", - OperationSuffix: "with-key", - }, - Fields: map[string]*framework.FieldSchema{ "serial_number": { Type: framework.TypeString, @@ -171,28 +77,6 @@ be in PEM format.`, // If this needs to write, the entire request will be forwarded to the // active node of the current performance cluster, but we don't want to // forward invalid revoke requests there. - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "revocation_time": { - Type: framework.TypeDurationSecond, - Description: `Revocation Time`, - Required: false, - }, - "revocation_time_rfc3339": { - Type: framework.TypeTime, - Description: `Revocation Time`, - Required: false, - }, - "state": { - Type: framework.TypeString, - Description: `Revocation State`, - Required: false, - }, - }, - }}, - }, }, }, @@ -205,12 +89,6 @@ func pathRotateCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl/rotate`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "rotate", - OperationSuffix: "crl", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRotateCRLRead, @@ -218,18 +96,6 @@ func pathRotateCRL(b *backend) *framework.Path { // so this request should be forwarded when it is first seen, not // when it is ready to write. ForwardPerformanceStandby: true, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "success": { - Type: framework.TypeBool, - Description: `Whether rotation was successful`, - Required: true, - }, - }, - }}, - }, }, }, @@ -242,12 +108,6 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl/rotate-delta`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "rotate", - OperationSuffix: "delta-crl", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRotateDeltaCRLRead, @@ -255,18 +115,6 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { // so this request should be forwarded when it is first seen, not // when it is ready to write. ForwardPerformanceStandby: true, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "success": { - Type: framework.TypeBool, - Description: `Whether rotation was successful`, - Required: true, - }, - }, - }}, - }, }, }, @@ -275,44 +123,7 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { } } -func pathListUnifiedRevoked(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "certs/unified-revoked/?$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "unified-revoked-certs", - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ListOperation: &framework.PathOperation{ - Callback: b.pathListUnifiedRevokedCertsHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `List of Keys`, - Required: false, - }, - "key_info": { - Type: framework.TypeString, - Description: `Key information`, - Required: false, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: pathListUnifiedRevokedHelpSyn, - HelpDescription: pathListUnifiedRevokedHelpDesc, - } -} - -func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *logical.Request, certPem string) (string, bool, *x509.Certificate, error) { +func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *logical.Request, certPem string) (string, bool, []byte, error) { // This function handles just the verification of the certificate against // the global issuer set, checking whether or not it is importable. // @@ -357,8 +168,7 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log // // Start with the latter since its cheaper. Fetch the cert (by serial) // and if it exists, compare the contents. - sc := b.makeStorageContext(ctx, req.Storage) - certEntry, err := fetchCertBySerial(sc, "certs/", serial) + certEntry, err := fetchCertBySerial(ctx, b, req, req.Path, serial) if err != nil { return serial, false, nil, err } @@ -382,7 +192,7 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log // imported this certificate, likely when we issued it. We don't // need to re-verify the signature as we assume it was already // verified when it was imported. - return serial, false, certReferenceStored, nil + return serial, false, certEntry.Value, nil } } @@ -390,6 +200,7 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log // parameter (except in error cases) should cause the cert to write out. // // Fetch and iterate through each issuer. + sc := b.makeStorageContext(ctx, req.Storage) issuers, err := sc.listIssuers() if err != nil { return serial, false, nil, err @@ -416,13 +227,13 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log } if foundMatchingIssuer { - return serial, true, certReference, nil + return serial, true, certReference.Raw, nil } return serial, false, nil, errutil.UserError{Err: "unable to verify signature on presented cert from any present issuer in this mount; certificates from previous CAs will need to have their issuing CA and key re-imported if revocation is necessary"} } -func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference *x509.Certificate, keyPem string) error { +func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Request, cert []byte, keyPem string) error { if keyPem == "" { // The only way to get here should be via the /revoke endpoint; // validate the path one more time and return an error if necessary. @@ -435,6 +246,12 @@ func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference * return nil } + // Parse the certificate for reference. + certReference, err := x509.ParseCertificate(cert) + if err != nil { + return errutil.UserError{Err: fmt.Sprintf("certificate could not be parsed: %v", err)} + } + // Now parse the key's PEM block. pemBlock, _ := pem.Decode([]byte(keyPem)) if pemBlock == nil { @@ -444,13 +261,9 @@ func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference * // Parse the inner DER key. signer, _, err := certutil.ParseDERKey(pemBlock.Bytes) if err != nil { - return fmt.Errorf("failed to parse provided private key: %w", err) + return fmt.Errorf("failed to parse provided private key: %v", err) } - return validatePrivateKeyMatchesCert(signer, certReference) -} - -func validatePrivateKeyMatchesCert(signer crypto.Signer, certReference *x509.Certificate) error { // Finally, verify if the cert and key match. This code has been // cribbed from the Go TLS config code, with minor modifications. // @@ -458,6 +271,7 @@ func validatePrivateKeyMatchesCert(signer crypto.Signer, certReference *x509.Cer // components and ensure we validate exponent and curve information // as well. // + // // See: https://github.com/golang/go/blob/c6a2dada0df8c2d75cf3ae599d7caed77d416fa2/src/crypto/tls/tls.go#L304-L331 switch certPub := certReference.PublicKey.(type) { case *rsa.PublicKey: @@ -494,47 +308,9 @@ func validatePrivateKeyMatchesCert(signer crypto.Signer, certReference *x509.Cer return nil } -func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *crlConfig, serial string, havePrivateKey bool) (*logical.Response, error) { - if !config.UseGlobalQueue { - return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found.", serial)), nil - } - - if havePrivateKey { - return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found, "+ - "and cross-cluster revocation not supported with key revocation.", serial)), nil - } - - // Here, we have to use the global revocation queue as the cert - // was not found on this current cluster. - currTime := time.Now() - nSerial := normalizeSerial(serial) - queueReq := revocationRequest{ - RequestedAt: currTime, - } - path := crossRevocationPath + nSerial - - reqEntry, err := logical.StorageEntryJSON(path, queueReq) - if err != nil { - return nil, fmt.Errorf("failed to create storage entry for cross-cluster revocation request: %w", err) - } - - if err := sc.Storage.Put(sc.Context, reqEntry); err != nil { - return nil, fmt.Errorf("error persisting cross-cluster revocation request: %w", err) - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "state": "pending", - }, - } - resp.AddWarning("Revocation request was not found on this present node. This request will be in a pending state until the PR cluster which issued this certificate sees the request and revokes the certificate. If no online cluster has this certificate, the request will eventually be removed without revoking any certificates.") - return resp, nil -} - func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *roleEntry) (*logical.Response, error) { rawSerial, haveSerial := data.GetOk("serial_number") rawCertificate, haveCert := data.GetOk("certificate") - sc := b.makeStorageContext(ctx, req.Storage) if !haveSerial && !haveCert { return logical.ErrorResponse("The serial number or certificate to revoke must be provided."), nil @@ -556,28 +332,16 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat } } - writeCert := false - var cert *x509.Certificate var serial string - - config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) - } - - if haveCert { - serial, writeCert, cert, err = b.pathRevokeWriteHandleCertificate(ctx, req, rawCertificate.(string)) - if err != nil { - return nil, err - } - } else { + if haveSerial { // Easy case: this cert should be in storage already. serial = rawSerial.(string) if len(serial) == 0 { return logical.ErrorResponse("The serial number must be provided"), nil } - certEntry, err := fetchCertBySerial(sc, "certs/", serial) + // Here, fetch the certificate from disk to validate we can revoke it. + certEntry, err := fetchCertBySerial(ctx, b, req, req.Path, serial) if err != nil { switch err.(type) { case errutil.UserError: @@ -586,55 +350,49 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat return nil, err } } + if certEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found or was already revoked", serial)), nil + } - if certEntry != nil { - cert, err = x509.ParseCertificate(certEntry.Value) - if err != nil { - return nil, fmt.Errorf("error parsing certificate: %w", err) - } + // Now, if the user provided a key, we'll have to make sure the key + // and stored certificate match. + if err := b.pathRevokeWriteHandleKey(ctx, req, certEntry.Value, keyPem); err != nil { + return nil, err + } + } else { + // Otherwise, we've gotta parse the certificate from the request and + // then import it into cluster-local storage. Before writing the + // certificate (and forwarding), we want to verify this certificate + // was actually signed by one of our present issuers. + var err error + var writeCert bool + var certBytes []byte + serial, writeCert, certBytes, err = b.pathRevokeWriteHandleCertificate(ctx, req, rawCertificate.(string)) + if err != nil { + return nil, err } - } - if cert == nil { - if config.UnifiedCRL { - // Saving grace if we aren't able to load the certificate locally/or were given it, - // if we have a unified revocation entry already return its revocation times, - // otherwise we fail with a certificate not found message. - unifiedRev, err := getUnifiedRevocationBySerial(sc, normalizeSerial(serial)) + // Before we write the certificate, we've gotta verify the request in + // the event of a PoP-based revocation scheme; we don't want to litter + // storage with issued-but-not-revoked certificates. + if err := b.pathRevokeWriteHandleKey(ctx, req, certBytes, keyPem); err != nil { + return nil, err + } + + // At this point, a forward operation will occur if we're on a standby + // node as we're now attempting to write the bytes of the cert out to + // disk. + if writeCert { + err = req.Storage.Put(ctx, &logical.StorageEntry{ + Key: "certs/" + serial, + Value: certBytes, + }) if err != nil { return nil, err } - if unifiedRev != nil { - return &logical.Response{ - Data: map[string]interface{}{ - "revocation_time": unifiedRev.RevocationTimeUTC.Unix(), - "revocation_time_rfc3339": unifiedRev.RevocationTimeUTC.Format(time.RFC3339Nano), - }, - }, nil - } } - return b.maybeRevokeCrossCluster(sc, config, serial, keyPem != "") - } - - // Before we write the certificate, we've gotta verify the request in - // the event of a PoP-based revocation scheme; we don't want to litter - // storage with issued-but-not-revoked certificates. - if err := b.pathRevokeWriteHandleKey(req, cert, keyPem); err != nil { - return nil, err - } - - // At this point, a forward operation will occur if we're on a standby - // node as we're now attempting to write the bytes of the cert out to - // disk. - if writeCert { - err := req.Storage.Put(ctx, &logical.StorageEntry{ - Key: "certs/" + normalizeSerial(serial), - Value: cert.Raw, - }) - if err != nil { - return nil, err - } + // Finally, we have a valid serial number to use for BYOC revocation! } // Assumption: this check is cheap. Call this twice, in the cert-import @@ -644,18 +402,21 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat return nil, logical.ErrReadOnly } + // We store and identify by lowercase colon-separated hex, but other + // utilities use dashes and/or uppercase, so normalize + serial = strings.ReplaceAll(strings.ToLower(serial), "-", ":") + b.revokeStorageLock.Lock() defer b.revokeStorageLock.Unlock() - return revokeCert(sc, config, cert) + return revokeCert(ctx, b, req, serial, false) } func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { b.revokeStorageLock.RLock() defer b.revokeStorageLock.RUnlock() - sc := b.makeStorageContext(ctx, req.Storage) - warnings, crlErr := b.crlBuilder.rebuild(sc, false) + crlErr := b.crlBuilder.rebuild(ctx, b, req, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -665,17 +426,11 @@ func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ } } - resp := &logical.Response{ + return &logical.Response{ Data: map[string]interface{}{ "success": true, }, - } - - for index, warning := range warnings { - resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } - - return resp, nil + }, nil } func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { @@ -683,12 +438,12 @@ func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Reque cfg, err := b.crlBuilder.getConfigWithUpdate(sc) if err != nil { - return nil, fmt.Errorf("error fetching CRL configuration: %w", err) + return nil, fmt.Errorf("error fetching CRL configuration: %v", err) } isEnabled := cfg.EnableDelta - warnings, crlErr := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, true) + crlErr := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, true) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -707,119 +462,10 @@ func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Reque if !isEnabled { resp.AddWarning("requested rebuild of delta CRL when delta CRL is not enabled; this is a no-op") } - for index, warning := range warnings { - resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } return resp, nil } -func (b *backend) pathListRevokedCertsHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, request.Storage) - - revokedCerts, err := sc.listRevokedCerts() - if err != nil { - return nil, err - } - - // Normalize serial back to a format people are expecting. - for i, serial := range revokedCerts { - revokedCerts[i] = denormalizeSerial(serial) - } - - return logical.ListResponse(revokedCerts), nil -} - -func (b *backend) pathListRevocationQueueHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - var responseKeys []string - responseInfo := make(map[string]interface{}) - - sc := b.makeStorageContext(ctx, request.Storage) - - clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) - if err != nil { - return nil, fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) - } - - for cIndex, cluster := range clusters { - cluster = cluster[0 : len(cluster)-1] - cPath := crossRevocationPrefix + cluster + "/" - serials, err := sc.Storage.List(sc.Context, cPath) - if err != nil { - return nil, fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) - } - - for _, serial := range serials { - // Always strip the slash out; it indicates the presence of - // a confirmed revocation, which we add to the main serial's - // entry. - hasSlash := serial[len(serial)-1] == '/' - if hasSlash { - serial = serial[0 : len(serial)-1] - } - serial = denormalizeSerial(serial) - - var data map[string]interface{} - rawData, isPresent := responseInfo[serial] - if !isPresent { - data = map[string]interface{}{} - responseKeys = append(responseKeys, serial) - } else { - data = rawData.(map[string]interface{}) - } - - if hasSlash { - data["confirmed"] = true - data["confirmation_cluster"] = cluster - } else { - data["requesting_cluster"] = cluster - } - - responseInfo[serial] = data - } - } - - return logical.ListResponseWithInfo(responseKeys, responseInfo), nil -} - -func (b *backend) pathListUnifiedRevokedCertsHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, request.Storage) - responseKeys := []string{} - responseInfo := make(map[string]interface{}) - - clusterPathsById, err := lookupUnifiedClusterPaths(sc) - if err != nil { - return nil, err - } - - for clusterId := range clusterPathsById { - clusterSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) - if err != nil { - return nil, err - } - for _, serial := range clusterSerials { - if strings.HasSuffix(serial, "/") { - // Skip folders as they wouldn't be a proper revocation - continue - } - colonSerial := denormalizeSerial(serial) - var data map[string][]string - rawData, isPresent := responseInfo[colonSerial] - if !isPresent { - responseKeys = append(responseKeys, colonSerial) - data = map[string][]string{} - } else { - data = rawData.(map[string][]string) - } - - data["revoking_clusters"] = append(data["revoking_clusters"], clusterId) - responseInfo[colonSerial] = data - } - } - - return logical.ListResponseWithInfo(responseKeys, responseInfo), nil -} - const pathRevokeHelpSyn = ` Revoke a certificate by serial number or with explicit certificate. @@ -847,28 +493,3 @@ Force a rebuild of the delta CRL. const pathRotateDeltaCRLHelpDesc = ` Force a rebuild of the delta CRL. This can be used to force an update of the otherwise periodically-rebuilt delta CRLs. ` - -const pathListRevokedHelpSyn = ` -List all revoked serial numbers within the local cluster -` - -const pathListRevokedHelpDesc = ` -Returns a list of serial numbers for revoked certificates in the local cluster. -` - -const pathListUnifiedRevokedHelpSyn = ` -List all revoked serial numbers within this cluster's unified storage area. -` - -const pathListUnifiedRevokedHelpDesc = ` -Returns a list of serial numbers for revoked certificates within this cluster's unified storage. -` - -const pathListRevocationQueueHelpSyn = ` -List all pending, cross-cluster revocations known to the local cluster. -` - -const pathListRevocationQueueHelpDesc = ` -Returns a detailed list containing serial number, requesting cluster, and -optionally a confirming cluster. -` diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go index 16564085e76f9..89a899a8c24dc 100644 --- a/builtin/logical/pki/path_roles.go +++ b/builtin/logical/pki/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -8,7 +5,6 @@ import ( "crypto/x509" "encoding/json" "fmt" - "net/http" "strings" "time" @@ -24,26 +20,9 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "roles", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: "List of roles", - Required: true, - }, - }, - }}, - }, }, }, @@ -53,360 +32,8 @@ func pathListRoles(b *backend) *framework.Path { } func pathRoles(b *backend) *framework.Path { - pathRolesResponseFields := map[string]*framework.FieldSchema{ - "ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: `The lease duration (validity period of the -certificate) if no specific lease duration is requested. -The lease duration controls the expiration of certificates -issued by this backend. Defaults to the system default -value or the value of max_ttl, whichever is shorter.`, - }, - - "max_ttl": { - Type: framework.TypeDurationSecond, - Required: true, - Description: `The maximum allowed lease duration. If not -set, defaults to the system maximum lease TTL.`, - }, - "allow_token_displayname": { - Type: framework.TypeBool, - Required: true, - Description: `Whether to allow "localhost" and "localdomain" -as a valid common name in a request, independent of allowed_domains value.`, - }, - - "allow_localhost": { - Type: framework.TypeBool, - Required: true, - Description: `Whether to allow "localhost" and "localdomain" -as a valid common name in a request, independent of allowed_domains value.`, - }, - - "allowed_domains": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `Specifies the domains this role is allowed -to issue certificates for. This is used with the allow_bare_domains, -allow_subdomains, and allow_glob_domains to determine matches for the -common name, DNS-typed SAN entries, and Email-typed SAN entries of -certificates. See the documentation for more information. This parameter -accepts a comma-separated string or list of domains.`, - }, - "allowed_domains_template": { - Type: framework.TypeBool, - Required: true, - Description: `If set, Allowed domains can be specified using identity template policies. - Non-templated domains are also permitted.`, - }, - "allow_bare_domains": { - Type: framework.TypeBool, - Required: true, - Description: `If set, clients can request certificates -for the base domains themselves, e.g. "example.com" of domains listed -in allowed_domains. This is a separate option as in some cases this can -be considered a security threat. See the documentation for more -information.`, - }, - - "allow_subdomains": { - Type: framework.TypeBool, - Required: true, - Description: `If set, clients can request certificates for -subdomains of domains listed in allowed_domains, including wildcard -subdomains. See the documentation for more information.`, - }, - - "allow_glob_domains": { - Type: framework.TypeBool, - Required: true, - Description: `If set, domains specified in allowed_domains -can include shell-style glob patterns, e.g. "ftp*.example.com". -See the documentation for more information.`, - }, - - "allow_wildcard_certificates": { - Type: framework.TypeBool, - Required: true, - Description: `If set, allows certificates with wildcards in -the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., -"*.example.net" or "b*z.example.net". See the documentation for more -information.`, - }, - - "allow_any_name": { - Type: framework.TypeBool, - Required: true, - Description: `If set, clients can request certificates for -any domain, regardless of allowed_domains restrictions. -See the documentation for more information.`, - }, - - "enforce_hostnames": { - Type: framework.TypeBool, - Required: true, - Description: `If set, only valid host names are allowed for -CN and DNS SANs, and the host part of email addresses. Defaults to true.`, - }, - - "allow_ip_sans": { - Type: framework.TypeBool, - Required: true, - Description: `If set, IP Subject Alternative Names are allowed. -Any valid IP is accepted and No authorization checking is performed.`, - }, - - "allowed_uri_sans": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `If set, an array of allowed URIs for URI Subject Alternative Names. -Any valid URI is accepted, these values support globbing.`, - }, - - "allowed_uri_sans_template": { - Type: framework.TypeBool, - Required: true, - Description: `If set, Allowed URI SANs can be specified using identity template policies. - Non-templated URI SANs are also permitted.`, - }, - - "allowed_other_sans": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`, - }, - - "allowed_serial_numbers": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, - }, - "allowed_user_ids": { - Type: framework.TypeCommaStringSlice, - Description: `If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1`, - }, - "server_flag": { - Type: framework.TypeBool, - Default: true, - Description: `If set, certificates are flagged for server auth use. -Defaults to true. See also RFC 5280 Section 4.2.1.12.`, - }, - - "client_flag": { - Type: framework.TypeBool, - Required: true, - Description: `If set, certificates are flagged for client auth use. -Defaults to true. See also RFC 5280 Section 4.2.1.12.`, - }, - - "code_signing_flag": { - Type: framework.TypeBool, - Required: true, - Description: `If set, certificates are flagged for code signing -use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, - }, - - "email_protection_flag": { - Type: framework.TypeBool, - Required: true, - Description: `If set, certificates are flagged for email -protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, - }, - - "key_type": { - Type: framework.TypeString, - Required: true, - Description: `The type of key to use; defaults to RSA. "rsa" -"ec", "ed25519" and "any" are the only valid values.`, - }, - - "key_bits": { - Type: framework.TypeInt, - Required: true, - Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, - }, - "signature_bits": { - Type: framework.TypeInt, - Required: true, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - }, - "use_pss": { - Type: framework.TypeBool, - Required: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - }, - "key_usage": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `A comma-separated string or list of key usages (not extended -key usages). Valid values can be found at -https://golang.org/pkg/crypto/x509/#KeyUsage --- simply drop the "KeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list. See also RFC 5280 -Section 4.2.1.3.`, - }, - - "ext_key_usage": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `A comma-separated string or list of extended key usages. Valid values can be found at -https://golang.org/pkg/crypto/x509/#ExtKeyUsage --- simply drop the "ExtKeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list. See also RFC 5280 -Section 4.2.1.12.`, - }, - - "ext_key_usage_oids": { - Type: framework.TypeCommaStringSlice, - Required: true, - Description: `A comma-separated string or list of extended key usage oids.`, - }, - - "use_csr_common_name": { - Type: framework.TypeBool, - Required: true, - Description: `If set, when used with a signing profile, -the common name in the CSR will be used. This -does *not* include any requested Subject Alternative -Names; use use_csr_sans for that. Defaults to true.`, - }, - - "use_csr_sans": { - Type: framework.TypeBool, - Required: true, - Description: `If set, when used with a signing profile, -the SANs in the CSR will be used. This does *not* -include the Common Name (cn); use use_csr_common_name -for that. Defaults to true.`, - }, - - "ou": { - Type: framework.TypeCommaStringSlice, - Description: `If set, OU (OrganizationalUnit) will be set to -this value in certificates issued by this role.`, - }, - - "organization": { - Type: framework.TypeCommaStringSlice, - Description: `If set, O (Organization) will be set to -this value in certificates issued by this role.`, - }, - - "country": { - Type: framework.TypeCommaStringSlice, - Description: `If set, Country will be set to -this value in certificates issued by this role.`, - }, - - "locality": { - Type: framework.TypeCommaStringSlice, - Description: `If set, Locality will be set to -this value in certificates issued by this role.`, - }, - - "province": { - Type: framework.TypeCommaStringSlice, - Description: `If set, Province will be set to -this value in certificates issued by this role.`, - }, - - "street_address": { - Type: framework.TypeCommaStringSlice, - Description: `If set, Street Address will be set to -this value in certificates issued by this role.`, - }, - - "postal_code": { - Type: framework.TypeCommaStringSlice, - Description: `If set, Postal Code will be set to -this value in certificates issued by this role.`, - }, - - "generate_lease": { - Type: framework.TypeBool, - Description: ` -If set, certificates issued/signed against this role will have Vault leases -attached to them. Defaults to "false". Certificates can be added to the CRL by -"vault revoke " when certificates are associated with leases. It can -also be done using the "pki/revoke" endpoint. However, when lease generation is -disabled, invoking "pki/revoke" would be the only way to add the certificates -to the CRL. When large number of certificates are generated with long -lifetimes, it is recommended that lease generation be disabled, as large amount of -leases adversely affect the startup time of Vault.`, - }, - - "no_store": { - Type: framework.TypeBool, - Description: ` -If set, certificates issued/signed against this role will not be stored in the -storage backend. This can improve performance when issuing large numbers of -certificates. However, certificates issued in this way cannot be enumerated -or revoked, so this option is recommended only for certificates that are -non-sensitive, or extremely short-lived. This option implies a value of "false" -for "generate_lease".`, - }, - - "require_cn": { - Type: framework.TypeBool, - Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`, - }, - - "cn_validations": { - Type: framework.TypeCommaStringSlice, - Description: `List of allowed validations to run against the -Common Name field. Values can include 'email' to validate the CN is a email -address, 'hostname' to validate the CN is a valid hostname (potentially -including wildcards). When multiple validations are specified, these take -OR semantics (either email OR hostname are allowed). The special value -'disabled' allows disabling all CN name validations, allowing for arbitrary -non-Hostname, non-Email address CNs.`, - }, - - "policy_identifiers": { - Type: framework.TypeCommaStringSlice, - Description: `A comma-separated string or list of policy OIDs, or a JSON list of qualified policy -information, which must include an oid, and may include a notice and/or cps url, using the form -[{"oid"="1.3.6.1.4.1.7.8","notice"="I am a user Notice"}, {"oid"="1.3.6.1.4.1.44947.1.2.4 ","cps"="https://example.com"}].`, - }, - - "basic_constraints_valid_for_non_ca": { - Type: framework.TypeBool, - Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, - }, - "not_before_duration": { - Type: framework.TypeDurationSecond, - Description: `The duration before now which the certificate needs to be backdated by.`, - }, - "not_after": { - Type: framework.TypeString, - Description: `Set the not after field of the certificate with specified date value. -The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.`, - }, - "issuer_ref": { - Type: framework.TypeString, - Description: `Reference to the issuer used to sign requests -serviced by this role.`, - }, - } - return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "role", - }, - Fields: map[string]*framework.FieldSchema{ "backend": { Type: framework.TypeString, @@ -553,11 +180,6 @@ Any valid URI is accepted, these values support globbing.`, Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, }, - "allowed_user_ids": { - Type: framework.TypeCommaStringSlice, - Description: `If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1`, - }, - "server_flag": { Type: framework.TypeBool, Default: true, @@ -823,44 +445,21 @@ serviced by this role.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRoleRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: pathRolesResponseFields, - }}, - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRoleCreate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: pathRolesResponseFields, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathRoleDelete, - Responses: map[int][]framework.Response{ - http.StatusNoContent: {{ - Description: "No Content", - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.PatchOperation: &framework.PathOperation{ Callback: b.pathRolePatch, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: pathRolesResponseFields, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -1014,8 +613,6 @@ func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*ro } } - result.Name = n - return &result, nil } @@ -1101,13 +698,11 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data RequireCN: data.Get("require_cn").(bool), CNValidations: data.Get("cn_validations").([]string), AllowedSerialNumbers: data.Get("allowed_serial_numbers").([]string), - AllowedUserIDs: data.Get("allowed_user_ids").([]string), PolicyIdentifiers: getPolicyIdentifier(data, nil), BasicConstraintsValidForNonCA: data.Get("basic_constraints_valid_for_non_ca").(bool), NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, NotAfter: data.Get("not_after").(string), Issuer: data.Get("issuer_ref").(string), - Name: name, } allowedOtherSANs := data.Get("allowed_other_sans").([]string) @@ -1150,6 +745,9 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data return nil, err } if warning != "" { + if resp == nil { + resp = &logical.Response{} + } resp.AddWarning(warning) } if resp.IsError() { @@ -1169,7 +767,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data } func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.Storage) (*logical.Response, error) { - resp := &logical.Response{} + var resp *logical.Response var err error if entry.MaxTTL > 0 && entry.TTL > entry.MaxTTL { @@ -1230,7 +828,6 @@ func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.S return nil, errutil.UserError{Err: err.Error()} } - resp.Data = entry.ToResponseData() return resp, nil } @@ -1301,7 +898,6 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data RequireCN: getWithExplicitDefault(data, "require_cn", oldEntry.RequireCN).(bool), CNValidations: getWithExplicitDefault(data, "cn_validations", oldEntry.CNValidations).([]string), AllowedSerialNumbers: getWithExplicitDefault(data, "allowed_serial_numbers", oldEntry.AllowedSerialNumbers).([]string), - AllowedUserIDs: getWithExplicitDefault(data, "allowed_user_ids", oldEntry.AllowedUserIDs).([]string), PolicyIdentifiers: getPolicyIdentifier(data, &oldEntry.PolicyIdentifiers), BasicConstraintsValidForNonCA: getWithExplicitDefault(data, "basic_constraints_valid_for_non_ca", oldEntry.BasicConstraintsValidForNonCA).(bool), NotBeforeDuration: getTimeWithExplicitDefault(data, "not_before_duration", oldEntry.NotBeforeDuration), @@ -1504,7 +1100,6 @@ type roleEntry struct { CNValidations []string `json:"cn_validations"` AllowedOtherSANs []string `json:"allowed_other_sans"` AllowedSerialNumbers []string `json:"allowed_serial_numbers"` - AllowedUserIDs []string `json:"allowed_user_ids"` AllowedURISANs []string `json:"allowed_uri_sans"` AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` PolicyIdentifiers []string `json:"policy_identifiers"` @@ -1513,8 +1108,6 @@ type roleEntry struct { NotBeforeDuration time.Duration `json:"not_before_duration"` NotAfter string `json:"not_after"` Issuer string `json:"issuer"` - // Name is only set when the role has been stored, on the fly roles have a blank name - Name string `json:"-"` } func (r *roleEntry) ToResponseData() map[string]interface{} { @@ -1556,7 +1149,6 @@ func (r *roleEntry) ToResponseData() map[string]interface{} { "no_store": r.NoStore, "allowed_other_sans": r.AllowedOtherSANs, "allowed_serial_numbers": r.AllowedSerialNumbers, - "allowed_user_ids": r.AllowedUserIDs, "allowed_uri_sans": r.AllowedURISANs, "require_cn": r.RequireCN, "cn_validations": r.CNValidations, diff --git a/builtin/logical/pki/path_roles_test.go b/builtin/logical/pki/path_roles_test.go index 98f5e277bbafa..34952d4772209 100644 --- a/builtin/logical/pki/path_roles_test.go +++ b/builtin/logical/pki/path_roles_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -14,7 +11,6 @@ import ( "github.com/go-errors/errors" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -24,7 +20,7 @@ func TestPki_RoleGenerateLease(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "allowed_domains": "myvault.com", @@ -129,7 +125,7 @@ func TestPki_RoleKeyUsage(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "allowed_domains": "myvault.com", @@ -145,14 +141,12 @@ func TestPki_RoleKeyUsage(t *testing.T) { } resp, err = b.HandleRequest(context.Background(), roleReq) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.UpdateOperation), resp, true) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v resp: %#v", err, resp) } roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.ReadOperation), resp, true) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v resp: %#v", err, resp) } @@ -222,7 +216,7 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "allowed_domains": "myvault.com", @@ -328,7 +322,7 @@ func TestPki_RoleAllowedDomains(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "allowed_domains": []string{"foobar.com", "*example.com"}, @@ -416,7 +410,7 @@ func TestPki_RoleAllowedURISANs(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "allowed_uri_sans": []string{"http://foobar.com", "spiffe://*"}, @@ -451,7 +445,7 @@ func TestPki_RolePkixFields(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "ttl": "5h", @@ -543,7 +537,7 @@ func TestPki_RoleNoStore(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ "allowed_domains": "myvault.com", @@ -664,7 +658,7 @@ func TestPki_CertsLease(t *testing.T) { t.Parallel() var resp *logical.Response var err error - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) caData := map[string]interface{}{ "common_name": "myvault.com", @@ -955,7 +949,7 @@ func TestPki_RolePatch(t *testing.T) { }, } - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) for index, testCase := range testCases { var resp *logical.Response @@ -1051,7 +1045,7 @@ func TestPKI_RolePolicyInformation_Flat(t *testing.T) { }, } - b, storage := CreateBackendWithStorage(t) + b, storage := createBackendWithStorage(t) caData := map[string]interface{}{ "common_name": "myvault.com", diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index fc5476bef05f0..5f9ab837e5406 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -15,7 +12,6 @@ import ( "encoding/pem" "errors" "fmt" - "net/http" "reflect" "strings" "time" @@ -30,34 +26,15 @@ import ( ) func pathGenerateRoot(b *backend) *framework.Path { - pattern := "root/generate/" + framework.GenericNameRegex("exported") - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "generate", - OperationSuffix: "root", - } - - return buildPathGenerateRoot(b, pattern, displayAttrs) + return buildPathGenerateRoot(b, "root/generate/"+framework.GenericNameRegex("exported")) } func pathDeleteRoot(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "root", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationSuffix: "root", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathCADeleteRoot, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - }}, - }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -103,15 +80,11 @@ func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ } } - // Delete legacy CA bundle and its backup, if any. + // Delete legacy CA bundle. if err := req.Storage.Delete(ctx, legacyCertBundlePath); err != nil { return nil, err } - if err := req.Storage.Delete(ctx, legacyCertBundleBackupPath); err != nil { - return nil, err - } - // Delete legacy CRL bundle. if err := req.Storage.Delete(ctx, legacyCRLPath); err != nil { return nil, err @@ -280,7 +253,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, // RevocationSigAlg unconditionally on roots now. myIssuer.RevocationSigAlg = parsedBundle.Certificate.SignatureAlgorithm if err := sc.writeIssuer(myIssuer); err != nil { - return nil, fmt.Errorf("unable to store PSS-updated issuer: %w", err) + return nil, fmt.Errorf("unable to store PSS-updated issuer: %v", err) } // Also store it as just the certificate identified by serial number, so it @@ -294,16 +267,13 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, if err != nil { return nil, fmt.Errorf("unable to store certificate locally: %w", err) } - b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + b.incrementTotalCertificatesCount(certsCounted, key) // Build a fresh CRL - warnings, err = b.crlBuilder.rebuild(sc, true) + err = b.crlBuilder.rebuild(ctx, b, req, true) if err != nil { return nil, err } - for index, warning := range warnings { - resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) - } if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") @@ -494,7 +464,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R if err != nil { return nil, fmt.Errorf("unable to store certificate locally: %w", err) } - b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + b.incrementTotalCertificatesCount(certsCounted, key) if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") diff --git a/builtin/logical/pki/path_sign_issuers.go b/builtin/logical/pki/path_sign_issuers.go index 0b6b8334e418f..cadbac5553f36 100644 --- a/builtin/logical/pki/path_sign_issuers.go +++ b/builtin/logical/pki/path_sign_issuers.go @@ -1,80 +1,28 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( - "net/http" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) func pathIssuerSignIntermediate(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-intermediate" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "sign", - OperationSuffix: "intermediate", - } - - return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) + return buildPathIssuerSignIntermediateRaw(b, pattern) } func pathSignIntermediate(b *backend) *framework.Path { pattern := "root/sign-intermediate" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIRoot, - OperationVerb: "sign", - OperationSuffix: "intermediate", - } - - return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) + return buildPathIssuerSignIntermediateRaw(b, pattern) } -func buildPathIssuerSignIntermediateRaw(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathIssuerSignIntermediateRaw(b *backend, pattern string) *framework.Path { fields := addIssuerRefField(map[string]*framework.FieldSchema{}) path := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: fields, + Pattern: pattern, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssuerSignIntermediate, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "expiration": { - Type: framework.TypeInt64, - Description: `Expiration Time`, - Required: true, - }, - "serial_number": { - Type: framework.TypeString, - Description: `Serial Number`, - Required: false, - }, - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "issuing_ca": { - Type: framework.TypeString, - Description: `Issuing CA`, - Required: true, - }, - "ca_chain": { - Type: framework.TypeStringSlice, - Description: `CA Chain`, - Required: true, - }, - }, - }}, - }, }, }, @@ -165,29 +113,15 @@ See the API documentation for more information about required parameters. func pathIssuerSignSelfIssued(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-self-issued" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "sign", - OperationSuffix: "self-issued", - } - - return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) + return buildPathIssuerSignSelfIssued(b, pattern) } func pathSignSelfIssued(b *backend) *framework.Path { pattern := "root/sign-self-issued" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIRoot, - OperationVerb: "sign", - OperationSuffix: "self-issued", - } - - return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) + return buildPathIssuerSignSelfIssued(b, pattern) } -func buildPathIssuerSignSelfIssued(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { +func buildPathIssuerSignSelfIssued(b *backend, pattern string) *framework.Path { fields := map[string]*framework.FieldSchema{ "certificate": { Type: framework.TypeString, @@ -201,29 +135,11 @@ func buildPathIssuerSignSelfIssued(b *backend, pattern string, displayAttrs *fra } fields = addIssuerRefField(fields) path := &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: fields, + Pattern: pattern, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssuerSignSelfIssued, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "certificate": { - Type: framework.TypeString, - Description: `Certificate`, - Required: true, - }, - "issuing_ca": { - Type: framework.TypeString, - Description: `Issuing CA`, - Required: true, - }, - }, - }}, - }, }, }, diff --git a/builtin/logical/pki/path_tidy.go b/builtin/logical/pki/path_tidy.go index 22c406249c3c7..d65d5c8c792c7 100644 --- a/builtin/logical/pki/path_tidy.go +++ b/builtin/logical/pki/path_tidy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -16,135 +13,38 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) var tidyCancelledError = errors.New("tidy operation cancelled") -type tidyStatusState int - -const ( - tidyStatusInactive tidyStatusState = iota - tidyStatusStarted = iota - tidyStatusFinished = iota - tidyStatusError = iota - tidyStatusCancelling = iota - tidyStatusCancelled = iota -) - -type tidyStatus struct { - // Parameters used to initiate the operation - safetyBuffer int - issuerSafetyBuffer int - revQueueSafetyBuffer int - acmeAccountSafetyBuffer int - - tidyCertStore bool - tidyRevokedCerts bool - tidyRevokedAssocs bool - tidyExpiredIssuers bool - tidyBackupBundle bool - tidyRevocationQueue bool - tidyCrossRevokedCerts bool - tidyAcme bool - pauseDuration string - - // Status - state tidyStatusState - err error - timeStarted time.Time - timeFinished time.Time - message string - - // These counts use a custom incrementer that grab and release - // a lock prior to reading. - certStoreDeletedCount uint - revokedCertDeletedCount uint - missingIssuerCertCount uint - revQueueDeletedCount uint - crossRevokedDeletedCount uint - - acmeAccountsCount uint - acmeAccountsRevokedCount uint - acmeAccountsDeletedCount uint - acmeOrdersDeletedCount uint -} - type tidyConfig struct { - // AutoTidy config - Enabled bool `json:"enabled"` - Interval time.Duration `json:"interval_duration"` - - // Tidy Operations - CertStore bool `json:"tidy_cert_store"` - RevokedCerts bool `json:"tidy_revoked_certs"` - IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` - ExpiredIssuers bool `json:"tidy_expired_issuers"` - BackupBundle bool `json:"tidy_move_legacy_ca_bundle"` - RevocationQueue bool `json:"tidy_revocation_queue"` - CrossRevokedCerts bool `json:"tidy_cross_cluster_revoked_certs"` - TidyAcme bool `json:"tidy_acme"` - - // Safety Buffers - SafetyBuffer time.Duration `json:"safety_buffer"` - IssuerSafetyBuffer time.Duration `json:"issuer_safety_buffer"` - QueueSafetyBuffer time.Duration `json:"revocation_queue_safety_buffer"` - AcmeAccountSafetyBuffer time.Duration `json:"acme_account_safety_buffer"` - PauseDuration time.Duration `json:"pause_duration"` - - // Metrics. - MaintainCount bool `json:"maintain_stored_certificate_counts"` - PublishMetrics bool `json:"publish_stored_certificate_count_metrics"` -} - -func (tc *tidyConfig) IsAnyTidyEnabled() bool { - return tc.CertStore || tc.RevokedCerts || tc.IssuerAssocs || tc.ExpiredIssuers || tc.BackupBundle || tc.TidyAcme || tc.CrossRevokedCerts || tc.RevocationQueue -} - -func (tc *tidyConfig) AnyTidyConfig() string { - return "tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations / tidy_expired_issuers / tidy_move_legacy_ca_bundle / tidy_revocation_queue / tidy_cross_cluster_revoked_certs / tidy_acme" + Enabled bool `json:"enabled"` + Interval time.Duration `json:"interval_duration"` + CertStore bool `json:"tidy_cert_store"` + RevokedCerts bool `json:"tidy_revoked_certs"` + IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` + SafetyBuffer time.Duration `json:"safety_buffer"` + PauseDuration time.Duration `json:"pause_duration"` } var defaultTidyConfig = tidyConfig{ - Enabled: false, - Interval: 12 * time.Hour, - CertStore: false, - RevokedCerts: false, - IssuerAssocs: false, - ExpiredIssuers: false, - BackupBundle: false, - TidyAcme: false, - SafetyBuffer: 72 * time.Hour, - IssuerSafetyBuffer: 365 * 24 * time.Hour, - AcmeAccountSafetyBuffer: 30 * 24 * time.Hour, - PauseDuration: 0 * time.Second, - MaintainCount: false, - PublishMetrics: false, - RevocationQueue: false, - QueueSafetyBuffer: 48 * time.Hour, - CrossRevokedCerts: false, + Enabled: false, + Interval: 12 * time.Hour, + CertStore: false, + RevokedCerts: false, + IssuerAssocs: false, + SafetyBuffer: 72 * time.Hour, + PauseDuration: 0 * time.Second, } func pathTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "tidy", - }, - - Fields: addTidyFields(map[string]*framework.FieldSchema{}), + Fields: addTidyFields(map[string]*framework.FieldSchema{}), Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidyWrite, - Responses: map[int][]framework.Response{ - http.StatusAccepted: {{ - Description: "Accepted", - Fields: map[string]*framework.FieldSchema{}, - }}, - }, + Callback: b.pathTidyWrite, ForwardPerformanceStandby: true, }, }, @@ -156,171 +56,9 @@ func pathTidy(b *backend) *framework.Path { func pathTidyCancel(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy-cancel$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "tidy", - OperationSuffix: "cancel", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidyCancelWrite, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer time duration`, - Required: false, - }, - "issuer_safety_buffer": { - Type: framework.TypeInt, - Description: `Issuer safety buffer`, - Required: false, - }, - "revocation_queue_safety_buffer": { - Type: framework.TypeInt, - Description: `Revocation queue safety buffer`, - Required: true, - }, - "tidy_cert_store": { - Type: framework.TypeBool, - Description: `Tidy certificate store`, - Required: false, - }, - "tidy_revoked_certs": { - Type: framework.TypeBool, - Description: `Tidy revoked certificates`, - Required: false, - }, - "tidy_revoked_cert_issuer_associations": { - Type: framework.TypeBool, - Description: `Tidy revoked certificate issuer associations`, - Required: false, - }, - "tidy_acme": { - Type: framework.TypeBool, - Description: `Tidy Unused Acme Accounts, and Orders`, - Required: false, - }, - "acme_account_safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer after creation after which accounts lacking orders are revoked`, - Required: false, - }, - "tidy_expired_issuers": { - Type: framework.TypeBool, - Description: `Tidy expired issuers`, - Required: false, - }, - "pause_duration": { - Type: framework.TypeString, - Description: `Duration to pause between tidying certificates`, - Required: false, - }, - "state": { - Type: framework.TypeString, - Description: `One of Inactive, Running, Finished, or Error`, - Required: false, - }, - "error": { - Type: framework.TypeString, - Description: `The error message`, - Required: false, - }, - "time_started": { - Type: framework.TypeString, - Description: `Time the operation started`, - Required: false, - }, - "time_finished": { - Type: framework.TypeString, - Description: `Time the operation finished`, - Required: false, - }, - "last_auto_tidy_finished": { - Type: framework.TypeString, - Description: `Time the last auto-tidy operation finished`, - Required: true, - }, - "message": { - Type: framework.TypeString, - Description: `Message of the operation`, - Required: false, - }, - "cert_store_deleted_count": { - Type: framework.TypeInt, - Description: `The number of certificate storage entries deleted`, - Required: false, - }, - "revoked_cert_deleted_count": { - Type: framework.TypeInt, - Description: `The number of revoked certificate entries deleted`, - Required: false, - }, - "current_cert_store_count": { - Type: framework.TypeInt, - Description: `The number of revoked certificate entries deleted`, - Required: false, - }, - "current_revoked_cert_count": { - Type: framework.TypeInt, - Description: `The number of revoked certificate entries deleted`, - Required: false, - }, - "missing_issuer_cert_count": { - Type: framework.TypeInt, - Required: false, - }, - "tidy_move_legacy_ca_bundle": { - Type: framework.TypeBool, - Required: false, - }, - "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeBool, - Required: false, - }, - "tidy_revocation_queue": { - Type: framework.TypeBool, - Required: false, - }, - "revocation_queue_deleted_count": { - Type: framework.TypeInt, - Required: false, - }, - "cross_revoked_cert_deleted_count": { - Type: framework.TypeInt, - Required: false, - }, - "internal_backend_uuid": { - Type: framework.TypeString, - Required: false, - }, - "total_acme_account_count": { - Type: framework.TypeInt, - Description: `Total number of acme accounts iterated over`, - Required: false, - }, - "acme_account_deleted_count": { - Type: framework.TypeInt, - Description: `The number of revoked acme accounts removed`, - Required: false, - }, - "acme_account_revoked_count": { - Type: framework.TypeInt, - Description: `The number of unused acme accounts revoked`, - Required: false, - }, - "acme_orders_deleted_count": { - Type: framework.TypeInt, - Description: `The number of expired, unused acme orders removed`, - Required: false, - }, - }, - }}, - }, + Callback: b.pathTidyCancelWrite, ForwardPerformanceStandby: true, }, }, @@ -332,173 +70,9 @@ func pathTidyCancel(b *backend) *framework.Path { func pathTidyStatus(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy-status$", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "tidy", - OperationSuffix: "status", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathTidyStatusRead, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer time duration`, - Required: true, - }, - "issuer_safety_buffer": { - Type: framework.TypeInt, - Description: `Issuer safety buffer`, - Required: true, - }, - "revocation_queue_safety_buffer": { - Type: framework.TypeInt, - Description: `Revocation queue safety buffer`, - Required: true, - }, - "acme_account_safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer after creation after which accounts lacking orders are revoked`, - Required: false, - }, - "tidy_cert_store": { - Type: framework.TypeBool, - Description: `Tidy certificate store`, - Required: true, - }, - "tidy_revoked_certs": { - Type: framework.TypeBool, - Description: `Tidy revoked certificates`, - Required: true, - }, - "tidy_revoked_cert_issuer_associations": { - Type: framework.TypeBool, - Description: `Tidy revoked certificate issuer associations`, - Required: true, - }, - "tidy_expired_issuers": { - Type: framework.TypeBool, - Description: `Tidy expired issuers`, - Required: true, - }, - "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeString, - Description: ``, - Required: false, - }, - "tidy_acme": { - Type: framework.TypeBool, - Description: `Tidy Unused Acme Accounts, and Orders`, - Required: true, - }, - "pause_duration": { - Type: framework.TypeString, - Description: `Duration to pause between tidying certificates`, - Required: true, - }, - "state": { - Type: framework.TypeString, - Description: `One of Inactive, Running, Finished, or Error`, - Required: true, - }, - "error": { - Type: framework.TypeString, - Description: `The error message`, - Required: true, - }, - "time_started": { - Type: framework.TypeString, - Description: `Time the operation started`, - Required: true, - }, - "time_finished": { - Type: framework.TypeString, - Description: `Time the operation finished`, - Required: false, - }, - "last_auto_tidy_finished": { - Type: framework.TypeString, - Description: `Time the last auto-tidy operation finished`, - Required: true, - }, - "message": { - Type: framework.TypeString, - Description: `Message of the operation`, - Required: true, - }, - "cert_store_deleted_count": { - Type: framework.TypeInt, - Description: `The number of certificate storage entries deleted`, - Required: true, - }, - "revoked_cert_deleted_count": { - Type: framework.TypeInt, - Description: `The number of revoked certificate entries deleted`, - Required: true, - }, - "current_cert_store_count": { - Type: framework.TypeInt, - Description: `The number of revoked certificate entries deleted`, - Required: true, - }, - "cross_revoked_cert_deleted_count": { - Type: framework.TypeInt, - Description: ``, - Required: true, - }, - "current_revoked_cert_count": { - Type: framework.TypeInt, - Description: `The number of revoked certificate entries deleted`, - Required: true, - }, - "revocation_queue_deleted_count": { - Type: framework.TypeInt, - Required: true, - }, - "tidy_move_legacy_ca_bundle": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_revocation_queue": { - Type: framework.TypeBool, - Required: true, - }, - "missing_issuer_cert_count": { - Type: framework.TypeInt, - Required: true, - }, - "internal_backend_uuid": { - Type: framework.TypeString, - Required: true, - }, - "total_acme_account_count": { - Type: framework.TypeInt, - Description: `Total number of acme accounts iterated over`, - Required: false, - }, - "acme_account_deleted_count": { - Type: framework.TypeInt, - Description: `The number of revoked acme accounts removed`, - Required: false, - }, - "acme_account_revoked_count": { - Type: framework.TypeInt, - Description: `The number of unused acme accounts revoked`, - Required: false, - }, - "acme_orders_deleted_count": { - Type: framework.TypeInt, - Description: `The number of expired, unused acme orders removed`, - Required: false, - }, - }, - }}, - }, + Callback: b.pathTidyStatusRead, ForwardPerformanceStandby: true, }, }, @@ -510,9 +84,6 @@ func pathTidyStatus(b *backend) *framework.Path { func pathConfigAutoTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/auto-tidy", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - }, Fields: addTidyFields(map[string]*framework.FieldSchema{ "enabled": { Type: framework.TypeBool, @@ -523,207 +94,13 @@ func pathConfigAutoTidy(b *backend) *framework.Path { Description: `Interval at which to run an auto-tidy operation. This is the time between tidy invocations (after one finishes to the start of the next). Running a manual tidy will reset this duration.`, Default: int(defaultTidyConfig.Interval / time.Second), // TypeDurationSecond currently requires the default to be an int. }, - "maintain_stored_certificate_counts": { - Type: framework.TypeBool, - Description: `This configures whether stored certificates -are counted upon initialization of the backend, and whether during -normal operation, a running count of certificates stored is maintained.`, - Default: false, - }, - "publish_stored_certificate_count_metrics": { - Type: framework.TypeBool, - Description: `This configures whether the stored certificate -count is published to the metrics consumer. It does not affect if the -stored certificate count is maintained, and if maintained, it will be -available on the tidy-status endpoint.`, - Default: false, - }, }), Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigAutoTidyRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "auto-tidy-configuration", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "enabled": { - Type: framework.TypeBool, - Description: `Specifies whether automatic tidy is enabled or not`, - Required: true, - }, - "interval_duration": { - Type: framework.TypeInt, - Description: `Specifies the duration between automatic tidy operation`, - Required: true, - }, - "tidy_cert_store": { - Type: framework.TypeBool, - Description: `Specifies whether to tidy up the certificate store`, - Required: true, - }, - "tidy_revoked_certs": { - Type: framework.TypeBool, - Description: `Specifies whether to remove all invalid and expired certificates from storage`, - Required: true, - }, - "tidy_revoked_cert_issuer_associations": { - Type: framework.TypeBool, - Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, - Required: true, - }, - "tidy_expired_issuers": { - Type: framework.TypeBool, - Description: `Specifies whether tidy expired issuers`, - Required: true, - }, - "tidy_acme": { - Type: framework.TypeBool, - Description: `Tidy Unused Acme Accounts, and Orders`, - Required: true, - }, - "safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer time duration`, - Required: true, - }, - "issuer_safety_buffer": { - Type: framework.TypeInt, - Description: `Issuer safety buffer`, - Required: true, - }, - "acme_account_safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer after creation after which accounts lacking orders are revoked`, - Required: false, - }, - "pause_duration": { - Type: framework.TypeString, - Description: `Duration to pause between tidying certificates`, - Required: true, - }, - "tidy_move_legacy_ca_bundle": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_revocation_queue": { - Type: framework.TypeBool, - Required: true, - }, - "revocation_queue_safety_buffer": { - Type: framework.TypeDurationSecond, - Required: true, - }, - "publish_stored_certificate_count_metrics": { - Type: framework.TypeBool, - Required: true, - }, - "maintain_stored_certificate_counts": { - Type: framework.TypeBool, - Required: true, - }, - }, - }}, - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigAutoTidyWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "auto-tidy", - }, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "enabled": { - Type: framework.TypeBool, - Description: `Specifies whether automatic tidy is enabled or not`, - Required: true, - }, - "interval_duration": { - Type: framework.TypeInt, - Description: `Specifies the duration between automatic tidy operation`, - Required: true, - }, - "tidy_cert_store": { - Type: framework.TypeBool, - Description: `Specifies whether to tidy up the certificate store`, - Required: true, - }, - "tidy_revoked_certs": { - Type: framework.TypeBool, - Description: `Specifies whether to remove all invalid and expired certificates from storage`, - Required: true, - }, - "tidy_revoked_cert_issuer_associations": { - Type: framework.TypeBool, - Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, - Required: true, - }, - "tidy_expired_issuers": { - Type: framework.TypeBool, - Description: `Specifies whether tidy expired issuers`, - Required: true, - }, - "tidy_acme": { - Type: framework.TypeBool, - Description: `Tidy Unused Acme Accounts, and Orders`, - Required: true, - }, - "safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer time duration`, - Required: true, - }, - "issuer_safety_buffer": { - Type: framework.TypeInt, - Description: `Issuer safety buffer`, - Required: true, - }, - "acme_account_safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer after creation after which accounts lacking orders are revoked`, - Required: true, - }, - "pause_duration": { - Type: framework.TypeString, - Description: `Duration to pause between tidying certificates`, - Required: true, - }, - "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_revocation_queue": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_move_legacy_ca_bundle": { - Type: framework.TypeBool, - Required: true, - }, - "revocation_queue_safety_buffer": { - Type: framework.TypeDurationSecond, - Required: true, - }, - "publish_stored_certificate_count_metrics": { - Type: framework.TypeBool, - Required: true, - }, - "maintain_stored_certificate_counts": { - Type: framework.TypeBool, - Required: true, - }, - }, - }}, - }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -739,33 +116,13 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr tidyCertStore := d.Get("tidy_cert_store").(bool) tidyRevokedCerts := d.Get("tidy_revoked_certs").(bool) || d.Get("tidy_revocation_list").(bool) tidyRevokedAssocs := d.Get("tidy_revoked_cert_issuer_associations").(bool) - tidyExpiredIssuers := d.Get("tidy_expired_issuers").(bool) - tidyBackupBundle := d.Get("tidy_move_legacy_ca_bundle").(bool) - issuerSafetyBuffer := d.Get("issuer_safety_buffer").(int) pauseDurationStr := d.Get("pause_duration").(string) pauseDuration := 0 * time.Second - tidyRevocationQueue := d.Get("tidy_revocation_queue").(bool) - queueSafetyBuffer := d.Get("revocation_queue_safety_buffer").(int) - tidyCrossRevokedCerts := d.Get("tidy_cross_cluster_revoked_certs").(bool) - tidyAcme := d.Get("tidy_acme").(bool) - acmeAccountSafetyBuffer := d.Get("acme_account_safety_buffer").(int) if safetyBuffer < 1 { return logical.ErrorResponse("safety_buffer must be greater than zero"), nil } - if issuerSafetyBuffer < 1 { - return logical.ErrorResponse("issuer_safety_buffer must be greater than zero"), nil - } - - if queueSafetyBuffer < 1 { - return logical.ErrorResponse("revocation_queue_safety_buffer must be greater than zero"), nil - } - - if acmeAccountSafetyBuffer < 1 { - return logical.ErrorResponse("acme_account_safety_buffer must be greater than zero"), nil - } - if pauseDurationStr != "" { var err error pauseDuration, err = time.ParseDuration(pauseDurationStr) @@ -779,27 +136,16 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr } bufferDuration := time.Duration(safetyBuffer) * time.Second - issuerBufferDuration := time.Duration(issuerSafetyBuffer) * time.Second - queueSafetyBufferDuration := time.Duration(queueSafetyBuffer) * time.Second - acmeAccountSafetyBufferDuration := time.Duration(acmeAccountSafetyBuffer) * time.Second // Manual run with constructed configuration. config := &tidyConfig{ - Enabled: true, - Interval: 0 * time.Second, - CertStore: tidyCertStore, - RevokedCerts: tidyRevokedCerts, - IssuerAssocs: tidyRevokedAssocs, - ExpiredIssuers: tidyExpiredIssuers, - BackupBundle: tidyBackupBundle, - SafetyBuffer: bufferDuration, - IssuerSafetyBuffer: issuerBufferDuration, - PauseDuration: pauseDuration, - RevocationQueue: tidyRevocationQueue, - QueueSafetyBuffer: queueSafetyBufferDuration, - CrossRevokedCerts: tidyCrossRevokedCerts, - TidyAcme: tidyAcme, - AcmeAccountSafetyBuffer: acmeAccountSafetyBufferDuration, + Enabled: true, + Interval: 0 * time.Second, + CertStore: tidyCertStore, + RevokedCerts: tidyRevokedCerts, + IssuerAssocs: tidyRevokedAssocs, + SafetyBuffer: bufferDuration, + PauseDuration: pauseDuration, } if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { @@ -824,20 +170,12 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr b.startTidyOperation(req, config) resp := &logical.Response{} - if !config.IsAnyTidyEnabled() { - resp.AddWarning("Manual tidy requested but no tidy operations were set. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ").") + if !tidyCertStore && !tidyRevokedCerts && !tidyRevokedAssocs { + resp.AddWarning("No targets to tidy; specify tidy_cert_store=true or tidy_revoked_certs=true or tidy_revoked_cert_issuer_associations=true to start a tidy operation.") } else { resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") } - if tidyRevocationQueue || tidyCrossRevokedCerts { - isNotPerfPrimary := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) - if isNotPerfPrimary { - resp.AddWarning("tidy_revocation_queue=true and tidy_cross_cluster_revoked_certs=true can only be set on the active node of the primary cluster unless a local mount is used; this option has been ignored.") - } - } - return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) } @@ -871,61 +209,6 @@ func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { } } - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - if config.ExpiredIssuers { - if err := b.doTidyExpiredIssuers(ctx, req, logger, config); err != nil { - return err - } - } - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - if config.BackupBundle { - if err := b.doTidyMoveCABundle(ctx, req, logger, config); err != nil { - return err - } - } - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - if config.RevocationQueue { - if err := b.doTidyRevocationQueue(ctx, req, logger, config); err != nil { - return err - } - } - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - if config.CrossRevokedCerts { - if err := b.doTidyCrossRevocationStore(ctx, req, logger, config); err != nil { - return err - } - } - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - if config.TidyAcme { - if err := b.doTidyAcme(ctx, req, logger, config); err != nil { - return err - } - } - return nil } @@ -995,7 +278,7 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log return fmt.Errorf("unable to parse stored certificate with serial %q: %w", serial, err) } - if time.Since(cert.NotAfter) > config.SafetyBuffer { + if time.Now().After(cert.NotAfter.Add(config.SafetyBuffer)) { if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { return fmt.Errorf("error deleting serial %q from storage: %w", serial, err) } @@ -1103,7 +386,7 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques // past its NotAfter value. This is because we use the // information on revoked/ to build the CRL and the // information on certs/ for lookup. - if time.Since(revokedCert.NotAfter) > config.SafetyBuffer { + if time.Now().After(revokedCert.NotAfter.Add(config.SafetyBuffer)) { if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { return fmt.Errorf("error deleting serial %q from revoked list: %w", serial, err) } @@ -1121,12 +404,12 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques if storeCert { revokedEntry, err = logical.StorageEntryJSON("revoked/"+serial, revInfo) if err != nil { - return fmt.Errorf("error building entry to persist changes to serial %v from revoked list: %w", serial, err) + return fmt.Errorf("error building entry to persist changes to serial %v from revoked list: %v", serial, err) } err = req.Storage.Put(ctx, revokedEntry) if err != nil { - return fmt.Errorf("error persisting changes to serial %v from revoked list: %w", serial, err) + return fmt.Errorf("error persisting changes to serial %v from revoked list: %v", serial, err) } } } @@ -1148,466 +431,9 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques } if !config.AutoRebuild { - warnings, err := b.crlBuilder.rebuild(sc, false) - if err != nil { + if err := b.crlBuilder.rebuild(ctx, b, req, false); err != nil { return err } - if len(warnings) > 0 { - msg := "During rebuild of CRL for tidy, got the following warnings:" - for index, warning := range warnings { - msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) - } - b.Logger().Warn(msg) - } - } - } - - return nil -} - -func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - // We do not support cancelling within the expired issuers operation. - // Any cancellation will occur before or after this operation. - - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - b.Logger().Debug("skipping expired issuer tidy as we're not on the primary or secondary with a local mount") - return nil - } - - // Short-circuit to avoid having to deal with the legacy mounts. While we - // could handle this case and remove these issuers, its somewhat - // unexpected behavior and we'd prefer to finish the migration first. - if b.useLegacyBundleCaStorage() { - return nil - } - - b.issuersLock.Lock() - defer b.issuersLock.Unlock() - - // Fetch and parse our issuers so we have their expiration date. - sc := b.makeStorageContext(ctx, req.Storage) - issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) - if err != nil { - return err - } - - // Fetch the issuer config to find the default; we don't want to remove - // the current active issuer automatically. - iConfig, err := sc.getIssuersConfig() - if err != nil { - return err - } - - // We want certificates which have expired before this date by a given - // safety buffer. - rebuildChainsAndCRL := false - - for issuer, cert := range issuerIDCertMap { - if time.Since(cert.NotAfter) <= config.IssuerSafetyBuffer { - continue - } - - entry, err := sc.fetchIssuerById(issuer) - if err != nil { - return nil - } - - // This issuer's certificate has expired. We explicitly persist the - // key, but log both the certificate and the keyId to the - // informational logs so an admin can recover the removed cert if - // necessary or remove the key (and know which cert it belonged to), - // if desired. - msg := "[Tidy on mount: %v] Issuer %v has expired by %v and is being removed." - idAndName := fmt.Sprintf("[id:%v/name:%v]", entry.ID, entry.Name) - msg = fmt.Sprintf(msg, b.backendUUID, idAndName, config.IssuerSafetyBuffer) - - // Before we log, check if we're the default. While this is late, and - // after we read it from storage, we have more info here to tell the - // user that their default has expired AND has passed the safety - // buffer. - if iConfig.DefaultIssuerId == issuer { - msg = "[Tidy on mount: %v] Issuer %v has expired and would be removed via tidy, but won't be, as it is currently the default issuer." - msg = fmt.Sprintf(msg, b.backendUUID, idAndName) - b.Logger().Warn(msg) - continue - } - - // Log the above message.. - b.Logger().Info(msg, "serial_number", entry.SerialNumber, "key_id", entry.KeyID, "certificate", entry.Certificate) - - wasDefault, err := sc.deleteIssuer(issuer) - if err != nil { - b.Logger().Error(fmt.Sprintf("failed to remove %v: %v", idAndName, err)) - return err - } - if wasDefault { - b.Logger().Warn(fmt.Sprintf("expired issuer %v was default; it is strongly encouraged to choose a new default issuer for backwards compatibility", idAndName)) - } - - rebuildChainsAndCRL = true - } - - if rebuildChainsAndCRL { - // When issuers are removed, there's a chance chains change as a - // result; remove them. - if err := sc.rebuildIssuersChains(nil); err != nil { - return err - } - - // Removal of issuers is generally a good reason to rebuild the CRL, - // even if auto-rebuild is enabled. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() - - warnings, err := b.crlBuilder.rebuild(sc, false) - if err != nil { - return err - } - if len(warnings) > 0 { - msg := "During rebuild of CRL for tidy, got the following warnings:" - for index, warning := range warnings { - msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) - } - b.Logger().Warn(msg) - } - } - - return nil -} - -func (b *backend) doTidyMoveCABundle(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - // We do not support cancelling within this operation; any cancel will - // occur before or after this operation. - - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - b.Logger().Debug("skipping moving the legacy CA bundle as we're not on the primary or secondary with a local mount") - return nil - } - - // Short-circuit to avoid moving the legacy bundle from under a legacy - // mount. - if b.useLegacyBundleCaStorage() { - return nil - } - - // If we've already run, exit. - _, bundle, err := getLegacyCertBundle(ctx, req.Storage) - if err != nil { - return fmt.Errorf("failed to fetch the legacy CA bundle: %w", err) - } - - if bundle == nil { - b.Logger().Debug("No legacy CA bundle available; nothing to do.") - return nil - } - - log, err := getLegacyBundleMigrationLog(ctx, req.Storage) - if err != nil { - return fmt.Errorf("failed to fetch the legacy bundle migration log: %w", err) - } - - if log == nil { - return fmt.Errorf("refusing to tidy with an empty legacy migration log but present CA bundle: %w", err) - } - - if time.Since(log.Created) <= config.IssuerSafetyBuffer { - b.Logger().Debug("Migration was created too recently to remove the legacy bundle; refusing to move legacy CA bundle to backup location.") - return nil - } - - // Do the write before the delete. - entry, err := logical.StorageEntryJSON(legacyCertBundleBackupPath, bundle) - if err != nil { - return fmt.Errorf("failed to create new backup storage entry: %w", err) - } - - err = req.Storage.Put(ctx, entry) - if err != nil { - return fmt.Errorf("failed to write new backup legacy CA bundle: %w", err) - } - - err = req.Storage.Delete(ctx, legacyCertBundlePath) - if err != nil { - return fmt.Errorf("failed to remove old legacy CA bundle path: %w", err) - } - - b.Logger().Info("legacy CA bundle successfully moved to backup location") - return nil -} - -func (b *backend) doTidyRevocationQueue(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - b.Logger().Debug("skipping cross-cluster revocation queue tidy as we're not on the primary or secondary with a local mount") - return nil - } - - sc := b.makeStorageContext(ctx, req.Storage) - clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) - } - - // Grab locks as we're potentially modifying revocation-related storage. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() - - for cIndex, cluster := range clusters { - if cluster[len(cluster)-1] == '/' { - cluster = cluster[0 : len(cluster)-1] - } - - cPath := crossRevocationPrefix + cluster + "/" - serials, err := sc.Storage.List(sc.Context, cPath) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) - } - - for _, serial := range serials { - // Check for cancellation. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - // Check for pause duration to reduce resource consumption. - if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() - time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() - } - - // Confirmation entries _should_ be handled by this cluster's - // processRevocationQueue(...) invocation; if not, when the plugin - // reloads, maybeGatherQueueForFirstProcess(...) will remove all - // stale confirmation requests. However, we don't want to force an - // operator to reload their in-use plugin, so allow tidy to also - // clean up confirmation values without reloading. - if serial[len(serial)-1] == '/' { - // Check if we have a confirmed entry. - confirmedPath := cPath + serial + "confirmed" - removalEntry, err := sc.Storage.Get(sc.Context, confirmedPath) - if err != nil { - return fmt.Errorf("error reading revocation confirmation (%v) during tidy: %w", confirmedPath, err) - } - if removalEntry == nil { - continue - } - - // Remove potential revocation requests from all clusters. - for _, subCluster := range clusters { - if subCluster[len(subCluster)-1] == '/' { - subCluster = subCluster[0 : len(subCluster)-1] - } - - reqPath := subCluster + "/" + serial[0:len(serial)-1] - if err := sc.Storage.Delete(sc.Context, reqPath); err != nil { - return fmt.Errorf("failed to remove confirmed revocation request on candidate cluster (%v): %w", reqPath, err) - } - } - - // Then delete the confirmation. - if err := sc.Storage.Delete(sc.Context, confirmedPath); err != nil { - return fmt.Errorf("failed to remove confirmed revocation confirmation (%v): %w", confirmedPath, err) - } - - // No need to handle a revocation request at this path: it can't - // still exist on this cluster after we deleted it above. - continue - } - - ePath := cPath + serial - entry, err := sc.Storage.Get(sc.Context, ePath) - if err != nil { - return fmt.Errorf("error reading revocation request (%v) to tidy: %w", ePath, err) - } - if entry == nil || entry.Value == nil { - continue - } - - var revRequest revocationRequest - if err := entry.DecodeJSON(&revRequest); err != nil { - return fmt.Errorf("error reading revocation request (%v) to tidy: %w", ePath, err) - } - - if time.Since(revRequest.RequestedAt) <= config.QueueSafetyBuffer { - continue - } - - // Safe to remove this entry. - if err := sc.Storage.Delete(sc.Context, ePath); err != nil { - return fmt.Errorf("error deleting revocation request (%v): %w", ePath, err) - } - - // Assumption: there should never be a need to remove this from - // the processing queue on this node. We're on the active primary, - // so our writes don't cause invalidations. This means we'd have - // to have slated it for deletion very quickly after it'd been - // sent (i.e., inside of the 1-minute boundary that periodicFunc - // executes at). While this is possible, because we grab the - // revocationStorageLock above, we can't execute interleaved - // with that periodicFunc, so the periodicFunc would've had to - // finished before we actually did this deletion (or it wouldn't - // have ignored this serial because our deletion would've - // happened prior to it reading the storage entry). Thus we should - // be safe to ignore the revocation queue removal here. - b.tidyStatusIncRevQueueCount() - } - } - - return nil -} - -func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - b.Logger().Debug("skipping cross-cluster revoked certificate store tidy as we're not on the primary or secondary with a local mount") - return nil - } - - sc := b.makeStorageContext(ctx, req.Storage) - clusters, err := sc.Storage.List(sc.Context, unifiedRevocationReadPathPrefix) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revoked certificate store participating clusters: %w", err) - } - - // Grab locks as we're potentially modifying revocation-related storage. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() - - for cIndex, cluster := range clusters { - if cluster[len(cluster)-1] == '/' { - cluster = cluster[0 : len(cluster)-1] - } - - cPath := unifiedRevocationReadPathPrefix + cluster + "/" - serials, err := sc.Storage.List(sc.Context, cPath) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revoked certificate store entries for cluster %v (%v): %w", cluster, cIndex, err) - } - - for _, serial := range serials { - // Check for cancellation. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - // Check for pause duration to reduce resource consumption. - if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() - time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() - } - - ePath := cPath + serial - entry, err := sc.Storage.Get(sc.Context, ePath) - if err != nil { - return fmt.Errorf("error reading cross-cluster revocation entry (%v) to tidy: %w", ePath, err) - } - if entry == nil || entry.Value == nil { - continue - } - - var details unifiedRevocationEntry - if err := entry.DecodeJSON(&details); err != nil { - return fmt.Errorf("error decoding cross-cluster revocation entry (%v) to tidy: %w", ePath, err) - } - - if time.Since(details.CertExpiration) <= config.SafetyBuffer { - continue - } - - // Safe to remove this entry. - if err := sc.Storage.Delete(sc.Context, ePath); err != nil { - return fmt.Errorf("error deleting revocation request (%v): %w", ePath, err) - } - - b.tidyStatusIncCrossRevCertCount() - } - } - - return nil -} - -func (b *backend) doTidyAcme(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - b.acmeAccountLock.Lock() - defer b.acmeAccountLock.Unlock() - - sc := b.makeStorageContext(ctx, req.Storage) - thumbprints, err := sc.Storage.List(ctx, acmeThumbprintPrefix) - if err != nil { - return err - } - - b.tidyStatusLock.Lock() - b.tidyStatus.acmeAccountsCount = uint(len(thumbprints)) - b.tidyStatusLock.Unlock() - - baseUrl, _, err := getAcmeBaseUrl(sc, req) - if err != nil { - return err - } - - acmeCtx := &acmeContext{ - baseUrl: baseUrl, - sc: sc, - } - - for _, thumbprint := range thumbprints { - err := b.tidyAcmeAccountByThumbprint(b.acmeState, acmeCtx, thumbprint, config.SafetyBuffer, config.AcmeAccountSafetyBuffer) - if err != nil { - logger.Warn("error tidying account %v: %v", thumbprint, err.Error()) - } - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - // Check for pause duration to reduce resource consumption. - if config.PauseDuration > (0 * time.Second) { - b.acmeAccountLock.Unlock() // Correct the Lock - time.Sleep(config.PauseDuration) - b.acmeAccountLock.Lock() - } - - } - - // Clean up any unused EAB - eabIds, err := b.acmeState.ListEabIds(sc) - if err != nil { - return fmt.Errorf("failed listing EAB ids: %w", err) - } - - for _, eabId := range eabIds { - eab, err := b.acmeState.LoadEab(sc, eabId) - if err != nil { - if errors.Is(err, ErrStorageItemNotFound) { - // We don't need to worry about a consumed EAB - continue - } - return err - } - - eabExpiration := eab.CreatedOn.Add(config.AcmeAccountSafetyBuffer) - if time.Now().After(eabExpiration) { - _, err := b.acmeState.DeleteEab(sc, eabId) - if err != nil { - return fmt.Errorf("failed to tidy eab %s: %w", eabId, err) - } - } - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - // Check for pause duration to reduce resource consumption. - if config.PauseDuration > (0 * time.Second) { - b.acmeAccountLock.Unlock() // Correct the Lock - time.Sleep(config.PauseDuration) - b.acmeAccountLock.Lock() } } @@ -1644,15 +470,9 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp := &logical.Response{ Data: map[string]interface{}{ "safety_buffer": nil, - "issuer_safety_buffer": nil, "tidy_cert_store": nil, "tidy_revoked_certs": nil, "tidy_revoked_cert_issuer_associations": nil, - "tidy_expired_issuers": nil, - "tidy_move_legacy_ca_bundle": nil, - "tidy_revocation_queue": nil, - "tidy_cross_cluster_revoked_certs": nil, - "tidy_acme": nil, "pause_duration": nil, "state": "Inactive", "error": nil, @@ -1664,60 +484,23 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f "missing_issuer_cert_count": nil, "current_cert_store_count": nil, "current_revoked_cert_count": nil, - "internal_backend_uuid": nil, - "revocation_queue_deleted_count": nil, - "cross_revoked_cert_deleted_count": nil, - "total_acme_account_count": nil, - "acme_account_deleted_count": nil, - "acme_account_revoked_count": nil, - "acme_orders_deleted_count": nil, - "acme_account_safety_buffer": nil, }, } - resp.Data["internal_backend_uuid"] = b.backendUUID - - if b.certCountEnabled.Load() { - resp.Data["current_cert_store_count"] = b.certCount.Load() - resp.Data["current_revoked_cert_count"] = b.revokedCertCount.Load() - if !b.certsCounted.Load() { - resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + - "inaccurate") - } - if b.certCountError != "" { - resp.Data["certificate_counting_error"] = b.certCountError - } - } - if b.tidyStatus.state == tidyStatusInactive { return resp, nil } resp.Data["safety_buffer"] = b.tidyStatus.safetyBuffer - resp.Data["issuer_safety_buffer"] = b.tidyStatus.issuerSafetyBuffer resp.Data["tidy_cert_store"] = b.tidyStatus.tidyCertStore resp.Data["tidy_revoked_certs"] = b.tidyStatus.tidyRevokedCerts resp.Data["tidy_revoked_cert_issuer_associations"] = b.tidyStatus.tidyRevokedAssocs - resp.Data["tidy_expired_issuers"] = b.tidyStatus.tidyExpiredIssuers - resp.Data["tidy_move_legacy_ca_bundle"] = b.tidyStatus.tidyBackupBundle - resp.Data["tidy_revocation_queue"] = b.tidyStatus.tidyRevocationQueue - resp.Data["tidy_cross_cluster_revoked_certs"] = b.tidyStatus.tidyCrossRevokedCerts - resp.Data["tidy_acme"] = b.tidyStatus.tidyAcme resp.Data["pause_duration"] = b.tidyStatus.pauseDuration resp.Data["time_started"] = b.tidyStatus.timeStarted resp.Data["message"] = b.tidyStatus.message resp.Data["cert_store_deleted_count"] = b.tidyStatus.certStoreDeletedCount resp.Data["revoked_cert_deleted_count"] = b.tidyStatus.revokedCertDeletedCount resp.Data["missing_issuer_cert_count"] = b.tidyStatus.missingIssuerCertCount - resp.Data["revocation_queue_deleted_count"] = b.tidyStatus.revQueueDeletedCount - resp.Data["cross_revoked_cert_deleted_count"] = b.tidyStatus.crossRevokedDeletedCount - resp.Data["revocation_queue_safety_buffer"] = b.tidyStatus.revQueueSafetyBuffer - resp.Data["last_auto_tidy_finished"] = b.lastTidy - resp.Data["total_acme_account_count"] = b.tidyStatus.acmeAccountsCount - resp.Data["acme_account_deleted_count"] = b.tidyStatus.acmeAccountsDeletedCount - resp.Data["acme_account_revoked_count"] = b.tidyStatus.acmeAccountsRevokedCount - resp.Data["acme_orders_deleted_count"] = b.tidyStatus.acmeOrdersDeletedCount - resp.Data["acme_account_safety_buffer"] = b.tidyStatus.acmeAccountSafetyBuffer switch b.tidyStatus.state { case tidyStatusStarted: @@ -1739,6 +522,14 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["time_finished"] = b.tidyStatus.timeFinished } + resp.Data["current_cert_store_count"] = atomic.LoadUint32(b.certCount) + resp.Data["current_revoked_cert_count"] = atomic.LoadUint32(b.revokedCertCount) + + if !b.certsCounted.Load() { + resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + + "inaccurate") + } + return resp, nil } @@ -1750,7 +541,15 @@ func (b *backend) pathConfigAutoTidyRead(ctx context.Context, req *logical.Reque } return &logical.Response{ - Data: getTidyConfigData(*config), + Data: map[string]interface{}{ + "enabled": config.Enabled, + "interval_duration": int(config.Interval / time.Second), + "tidy_cert_store": config.CertStore, + "tidy_revoked_certs": config.RevokedCerts, + "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, + "safety_buffer": int(config.SafetyBuffer / time.Second), + "pause_duration": config.PauseDuration.String(), + }, }, nil } @@ -1787,7 +586,7 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ if safetyBufferRaw, ok := d.GetOk("safety_buffer"); ok { config.SafetyBuffer = time.Duration(safetyBufferRaw.(int)) * time.Second if config.SafetyBuffer < 1*time.Second { - return logical.ErrorResponse(fmt.Sprintf("given safety_buffer must be at least one second; got: %v", safetyBufferRaw)), nil + return logical.ErrorResponse(fmt.Sprintf("given safety_buffer must be greater than zero seconds; got: %v", safetyBufferRaw)), nil } } @@ -1802,63 +601,11 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ } } - if expiredIssuers, ok := d.GetOk("tidy_expired_issuers"); ok { - config.ExpiredIssuers = expiredIssuers.(bool) - } - - if issuerSafetyBufferRaw, ok := d.GetOk("issuer_safety_buffer"); ok { - config.IssuerSafetyBuffer = time.Duration(issuerSafetyBufferRaw.(int)) * time.Second - if config.IssuerSafetyBuffer < 1*time.Second { - return logical.ErrorResponse(fmt.Sprintf("given safety_buffer must be at least one second; got: %v", issuerSafetyBufferRaw)), nil - } - } - - if backupBundle, ok := d.GetOk("tidy_move_legacy_ca_bundle"); ok { - config.BackupBundle = backupBundle.(bool) - } - - if revocationQueueRaw, ok := d.GetOk("tidy_revocation_queue"); ok { - config.RevocationQueue = revocationQueueRaw.(bool) - } - - if queueSafetyBufferRaw, ok := d.GetOk("revocation_queue_safety_buffer"); ok { - config.QueueSafetyBuffer = time.Duration(queueSafetyBufferRaw.(int)) * time.Second - if config.QueueSafetyBuffer < 1*time.Second { - return logical.ErrorResponse(fmt.Sprintf("given revocation_queue_safety_buffer must be at least one second; got: %v", queueSafetyBufferRaw)), nil - } - } - - if crossRevokedRaw, ok := d.GetOk("tidy_cross_cluster_revoked_certs"); ok { - config.CrossRevokedCerts = crossRevokedRaw.(bool) - } - - if tidyAcmeRaw, ok := d.GetOk("tidy_acme"); ok { - config.TidyAcme = tidyAcmeRaw.(bool) - } - - if config.Enabled && !config.IsAnyTidyEnabled() { - return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ")."), nil - } - - if maintainCountEnabledRaw, ok := d.GetOk("maintain_stored_certificate_counts"); ok { - config.MaintainCount = maintainCountEnabledRaw.(bool) - } - - if runningStorageMetricsEnabledRaw, ok := d.GetOk("publish_stored_certificate_count_metrics"); ok { - config.PublishMetrics = runningStorageMetricsEnabledRaw.(bool) - } - - if config.PublishMetrics && !config.MaintainCount { - return logical.ErrorResponse("Can not publish a running storage metrics count to metrics without first maintaining that count. Enable `maintain_stored_certificate_counts` to enable `publish_stored_certificate_count_metrics`."), nil + if config.Enabled && !(config.CertStore || config.RevokedCerts || config.IssuerAssocs) { + return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations)."), nil } - if err := sc.writeAutoTidyConfig(config); err != nil { - return nil, err - } - - return &logical.Response{ - Data: getTidyConfigData(*config), - }, nil + return nil, sc.writeAutoTidyConfig(config) } func (b *backend) tidyStatusStart(config *tidyConfig) { @@ -1866,19 +613,11 @@ func (b *backend) tidyStatusStart(config *tidyConfig) { defer b.tidyStatusLock.Unlock() b.tidyStatus = &tidyStatus{ - safetyBuffer: int(config.SafetyBuffer / time.Second), - issuerSafetyBuffer: int(config.IssuerSafetyBuffer / time.Second), - revQueueSafetyBuffer: int(config.QueueSafetyBuffer / time.Second), - acmeAccountSafetyBuffer: int(config.AcmeAccountSafetyBuffer / time.Second), - tidyCertStore: config.CertStore, - tidyRevokedCerts: config.RevokedCerts, - tidyRevokedAssocs: config.IssuerAssocs, - tidyExpiredIssuers: config.ExpiredIssuers, - tidyBackupBundle: config.BackupBundle, - tidyRevocationQueue: config.RevocationQueue, - tidyCrossRevokedCerts: config.CrossRevokedCerts, - tidyAcme: config.TidyAcme, - pauseDuration: config.PauseDuration.String(), + safetyBuffer: int(config.SafetyBuffer / time.Second), + tidyCertStore: config.CertStore, + tidyRevokedCerts: config.RevokedCerts, + tidyRevokedAssocs: config.IssuerAssocs, + pauseDuration: config.PauseDuration.String(), state: tidyStatusStarted, timeStarted: time.Now(), @@ -1926,7 +665,7 @@ func (b *backend) tidyStatusIncCertStoreCount() { b.tidyStatus.certStoreDeletedCount++ - b.ifCountEnabledDecrementTotalCertificatesCountReport() + b.decrementTotalCertificatesCountReport() } func (b *backend) tidyStatusIncRevokedCertCount() { @@ -1935,7 +674,7 @@ func (b *backend) tidyStatusIncRevokedCertCount() { b.tidyStatus.revokedCertDeletedCount++ - b.ifCountEnabledDecrementTotalRevokedCertificatesCountReport() + b.decrementTotalRevokedCertificatesCountReport() } func (b *backend) tidyStatusIncMissingIssuerCertCount() { @@ -1945,41 +684,6 @@ func (b *backend) tidyStatusIncMissingIssuerCertCount() { b.tidyStatus.missingIssuerCertCount++ } -func (b *backend) tidyStatusIncRevQueueCount() { - b.tidyStatusLock.Lock() - defer b.tidyStatusLock.Unlock() - - b.tidyStatus.revQueueDeletedCount++ -} - -func (b *backend) tidyStatusIncCrossRevCertCount() { - b.tidyStatusLock.Lock() - defer b.tidyStatusLock.Unlock() - - b.tidyStatus.crossRevokedDeletedCount++ -} - -func (b *backend) tidyStatusIncRevAcmeAccountCount() { - b.tidyStatusLock.Lock() - defer b.tidyStatusLock.Unlock() - - b.tidyStatus.acmeAccountsRevokedCount++ -} - -func (b *backend) tidyStatusIncDeletedAcmeAccountCount() { - b.tidyStatusLock.Lock() - defer b.tidyStatusLock.Unlock() - - b.tidyStatus.acmeAccountsDeletedCount++ -} - -func (b *backend) tidyStatusIncDelAcmeOrderCount() { - b.tidyStatusLock.Lock() - defer b.tidyStatusLock.Unlock() - - b.tidyStatus.acmeOrdersDeletedCount++ -} - const pathTidyHelpSyn = ` Tidy up the backend by removing expired certificates, revocation information, or both. @@ -2041,20 +745,6 @@ The result includes the following fields: * 'cert_store_deleted_count': The number of certificate storage entries deleted * 'revoked_cert_deleted_count': The number of revoked certificate entries deleted * 'missing_issuer_cert_count': The number of revoked certificates which were missing a valid issuer reference -* 'tidy_expired_issuers': the value of this parameter when initiating the tidy operation -* 'issuer_safety_buffer': the value of this parameter when initiating the tidy operation -* 'tidy_move_legacy_ca_bundle': the value of this parameter when initiating the tidy operation -* 'tidy_revocation_queue': the value of this parameter when initiating the tidy operation -* 'revocation_queue_deleted_count': the number of revocation queue entries deleted -* 'tidy_cross_cluster_revoked_certs': the value of this parameter when initiating the tidy operation -* 'cross_revoked_cert_deleted_count': the number of cross-cluster revoked certificate entries deleted -* 'revocation_queue_safety_buffer': the value of this parameter when initiating the tidy operation -* 'tidy_acme': the value of this parameter when initiating the tidy operation -* 'acme_account_safety_buffer': the value of this parameter when initiating the tidy operation -* 'total_acme_account_count': the total number of acme accounts in the list to be iterated over -* 'acme_account_deleted_count': the number of revoked acme accounts deleted during the operation -* 'acme_account_revoked_count': the number of acme accounts revoked during the operation -* 'acme_orders_deleted_count': the number of acme orders deleted during the operation ` const pathConfigAutoTidySyn = ` @@ -2070,26 +760,3 @@ controls the frequency of auto-tidy execution). Once enabled, a tidy operation will be kicked off automatically, as if it were executed with the posted configuration. ` - -func getTidyConfigData(config tidyConfig) map[string]interface{} { - return map[string]interface{}{ - // This map is in the same order as tidyConfig to ensure that all fields are accounted for - "enabled": config.Enabled, - "interval_duration": int(config.Interval / time.Second), - "tidy_cert_store": config.CertStore, - "tidy_revoked_certs": config.RevokedCerts, - "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, - "tidy_expired_issuers": config.ExpiredIssuers, - "tidy_move_legacy_ca_bundle": config.BackupBundle, - "tidy_acme": config.TidyAcme, - "safety_buffer": int(config.SafetyBuffer / time.Second), - "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), - "acme_account_safety_buffer": int(config.AcmeAccountSafetyBuffer / time.Second), - "pause_duration": config.PauseDuration.String(), - "publish_stored_certificate_count_metrics": config.PublishMetrics, - "maintain_stored_certificate_counts": config.MaintainCount, - "tidy_revocation_queue": config.RevocationQueue, - "revocation_queue_safety_buffer": int(config.QueueSafetyBuffer / time.Second), - "tidy_cross_cluster_revoked_certs": config.CrossRevokedCerts, - } -} diff --git a/builtin/logical/pki/path_tidy_test.go b/builtin/logical/pki/path_tidy_test.go index a5469ce003e58..e06c7068bb1b0 100644 --- a/builtin/logical/pki/path_tidy_test.go +++ b/builtin/logical/pki/path_tidy_test.go @@ -1,21 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( - "encoding/json" - "errors" - "fmt" - "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - - "github.com/armon/go-metrics" - "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" @@ -24,41 +12,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestTidyConfigs(t *testing.T) { - t.Parallel() - - var cfg tidyConfig - operations := strings.Split(cfg.AnyTidyConfig(), " / ") - t.Logf("Got tidy operations: %v", operations) - - for _, operation := range operations { - b, s := CreateBackendWithStorage(t) - - resp, err := CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ - "enabled": true, - operation: true, - }) - requireSuccessNonNilResponse(t, resp, err, "expected to be able to enable auto-tidy operation "+operation) - - resp, err = CBRead(b, s, "config/auto-tidy") - requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) - require.True(t, resp.Data[operation].(bool), "expected operation to be enabled after reading auto-tidy config "+operation) - - resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ - operation: true, - }) - requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+operation) - if len(resp.Warnings) > 0 { - t.Logf("got warnings while starting manual tidy: %v", resp.Warnings) - for _, warning := range resp.Warnings { - if strings.Contains(warning, "Manual tidy requested but no tidy operations were set.") { - t.Fatalf("expected to be able to enable tidy operation with just %v but got warning: %v / (resp=%v)", operation, warning, resp) - } - } - } - } -} - func TestAutoTidy(t *testing.T) { t.Parallel() @@ -110,7 +63,6 @@ func TestAutoTidy(t *testing.T) { require.NotNil(t, resp) require.NotEmpty(t, resp.Data) require.NotEmpty(t, resp.Data["issuer_id"]) - issuerId := resp.Data["issuer_id"] // Run tidy so status is not empty when we run it later... _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ @@ -149,17 +101,6 @@ func TestAutoTidy(t *testing.T) { leafSerial := resp.Data["serial_number"].(string) leafCert := parseCert(t, resp.Data["certificate"].(string)) - // Read cert before revoking - resp, err = client.Logical().Read("pki/cert/" + leafSerial) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - revocationTime, err := (resp.Data["revocation_time"].(json.Number)).Int64() - require.Equal(t, int64(0), revocationTime, "revocation time was not zero") - require.Empty(t, resp.Data["revocation_time_rfc3339"], "revocation_time_rfc3339 was not empty") - require.Empty(t, resp.Data["issuer_id"], "issuer_id was not empty") - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ "serial_number": leafSerial, }) @@ -171,22 +112,6 @@ func TestAutoTidy(t *testing.T) { require.NotNil(t, resp) require.NotNil(t, resp.Data) require.NotEmpty(t, resp.Data["certificate"]) - revocationTime, err = (resp.Data["revocation_time"].(json.Number)).Int64() - require.NoError(t, err, "failed converting %s to int", resp.Data["revocation_time"]) - revTime := time.Unix(revocationTime, 0) - now := time.Now() - if !(now.After(revTime) && now.Add(-10*time.Minute).Before(revTime)) { - t.Fatalf("parsed revocation time not within the last 10 minutes current time: %s, revocation time: %s", now, revTime) - } - utcLoc, err := time.LoadLocation("UTC") - require.NoError(t, err, "failed to parse UTC location?") - - rfc3339RevocationTime, err := time.Parse(time.RFC3339Nano, resp.Data["revocation_time_rfc3339"].(string)) - require.NoError(t, err, "failed parsing revocation_time_rfc3339 field: %s", resp.Data["revocation_time_rfc3339"]) - - require.Equal(t, revTime.In(utcLoc), rfc3339RevocationTime.Truncate(time.Second), - "revocation times did not match revocation_time: %s, "+"rfc3339 time: %s", revTime, rfc3339RevocationTime) - require.Equal(t, issuerId, resp.Data["issuer_id"], "issuer_id on leaf cert did not match") // Wait for cert to expire and the safety buffer to elapse. time.Sleep(time.Until(leafCert.NotAfter) + 3*time.Second) @@ -238,7 +163,7 @@ func TestTidyCancellation(t *testing.T) { numLeaves := 100 - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Create a root, a role, and a bunch of leaves. _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -264,19 +189,17 @@ func TestTidyCancellation(t *testing.T) { // Kick off a tidy operation (which runs in the background), but with // a slow-ish pause between certificates. - resp, err := CBWrite(b, s, "tidy", map[string]interface{}{ + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ "tidy_cert_store": true, "safety_buffer": "1s", "pause_duration": "1s", }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy"), logical.UpdateOperation), resp, true) // If we wait six seconds, the operation should still be running. That's // how we check that pause_duration works. time.Sleep(3 * time.Second) - resp, err = CBRead(b, s, "tidy-status") - + resp, err := CBRead(b, s, "tidy-status") require.NoError(t, err) require.NotNil(t, resp) require.NotNil(t, resp.Data) @@ -284,7 +207,6 @@ func TestTidyCancellation(t *testing.T) { // If we now cancel the operation, the response should say Cancelling. cancelResp, err := CBWrite(b, s, "tidy-cancel", map[string]interface{}{}) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy-cancel"), logical.UpdateOperation), resp, true) require.NoError(t, err) require.NotNil(t, cancelResp) require.NotNil(t, cancelResp.Data) @@ -304,7 +226,6 @@ func TestTidyCancellation(t *testing.T) { time.Sleep(3 * time.Second) statusResp, err := CBRead(b, s, "tidy-status") - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy-status"), logical.ReadOperation), resp, true) require.NoError(t, err) require.NotNil(t, statusResp) require.NotNil(t, statusResp.Data) @@ -314,483 +235,3 @@ func TestTidyCancellation(t *testing.T) { t.Fatalf("expected to only process at most 3 more certificates, but processed (%v >>> %v) certs", nowMany, howMany) } } - -func TestTidyIssuers(t *testing.T) { - t.Parallel() - - b, s := CreateBackendWithStorage(t) - - // Create a root that expires quickly and one valid for longer. - _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root1 example.com", - "issuer_name": "root-expired", - "ttl": "1s", - "key_type": "ec", - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root2 example.com", - "issuer_name": "root-valid", - "ttl": "60m", - "key_type": "rsa", - }) - require.NoError(t, err) - - // Sleep long enough to expire the root. - time.Sleep(2 * time.Second) - - // First tidy run shouldn't remove anything; too long of safety buffer. - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_expired_issuers": true, - "issuer_safety_buffer": "60m", - }) - require.NoError(t, err) - - // Wait for tidy to finish. - time.Sleep(2 * time.Second) - - // Expired issuer should exist. - resp, err := CBRead(b, s, "issuer/root-expired") - requireSuccessNonNilResponse(t, resp, err, "expired should still be present") - resp, err = CBRead(b, s, "issuer/root-valid") - requireSuccessNonNilResponse(t, resp, err, "valid should still be present") - - // Second tidy run with shorter safety buffer shouldn't remove the - // expired one, as it should be the default issuer. - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_expired_issuers": true, - "issuer_safety_buffer": "1s", - }) - require.NoError(t, err) - - // Wait for tidy to finish. - time.Sleep(2 * time.Second) - - // Expired issuer should still exist. - resp, err = CBRead(b, s, "issuer/root-expired") - requireSuccessNonNilResponse(t, resp, err, "expired should still be present") - resp, err = CBRead(b, s, "issuer/root-valid") - requireSuccessNonNilResponse(t, resp, err, "valid should still be present") - - // Update the default issuer. - _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ - "default": "root-valid", - }) - require.NoError(t, err) - - // Third tidy run should remove the expired one. - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_expired_issuers": true, - "issuer_safety_buffer": "1s", - }) - require.NoError(t, err) - - // Wait for tidy to finish. - time.Sleep(2 * time.Second) - - // Valid issuer should exist still; other should be removed. - resp, err = CBRead(b, s, "issuer/root-expired") - require.Error(t, err) - require.Nil(t, resp) - resp, err = CBRead(b, s, "issuer/root-valid") - requireSuccessNonNilResponse(t, resp, err, "valid should still be present") - - // Finally, one more tidy should cause no changes. - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_expired_issuers": true, - "issuer_safety_buffer": "1s", - }) - require.NoError(t, err) - - // Wait for tidy to finish. - time.Sleep(2 * time.Second) - - // Valid issuer should exist still; other should be removed. - resp, err = CBRead(b, s, "issuer/root-expired") - require.Error(t, err) - require.Nil(t, resp) - resp, err = CBRead(b, s, "issuer/root-valid") - requireSuccessNonNilResponse(t, resp, err, "valid should still be present") - - // Ensure we have safety buffer and expired issuers set correctly. - statusResp, err := CBRead(b, s, "tidy-status") - require.NoError(t, err) - require.NotNil(t, statusResp) - require.NotNil(t, statusResp.Data) - require.Equal(t, statusResp.Data["issuer_safety_buffer"], 1) - require.Equal(t, statusResp.Data["tidy_expired_issuers"], true) -} - -func TestTidyIssuerConfig(t *testing.T) { - t.Parallel() - - b, s := CreateBackendWithStorage(t) - - // Ensure the default auto-tidy config matches expectations - resp, err := CBRead(b, s, "config/auto-tidy") - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/auto-tidy"), logical.ReadOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err) - - jsonBlob, err := json.Marshal(&defaultTidyConfig) - require.NoError(t, err) - var defaultConfigMap map[string]interface{} - err = json.Unmarshal(jsonBlob, &defaultConfigMap) - require.NoError(t, err) - - // Coerce defaults to API response types. - defaultConfigMap["interval_duration"] = int(time.Duration(defaultConfigMap["interval_duration"].(float64)) / time.Second) - defaultConfigMap["issuer_safety_buffer"] = int(time.Duration(defaultConfigMap["issuer_safety_buffer"].(float64)) / time.Second) - defaultConfigMap["safety_buffer"] = int(time.Duration(defaultConfigMap["safety_buffer"].(float64)) / time.Second) - defaultConfigMap["pause_duration"] = time.Duration(defaultConfigMap["pause_duration"].(float64)).String() - defaultConfigMap["revocation_queue_safety_buffer"] = int(time.Duration(defaultConfigMap["revocation_queue_safety_buffer"].(float64)) / time.Second) - defaultConfigMap["acme_account_safety_buffer"] = int(time.Duration(defaultConfigMap["acme_account_safety_buffer"].(float64)) / time.Second) - - require.Equal(t, defaultConfigMap, resp.Data) - - // Ensure setting issuer-tidy related fields stick. - resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ - "tidy_expired_issuers": true, - "issuer_safety_buffer": "5s", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/auto-tidy"), logical.UpdateOperation), resp, true) - - requireSuccessNonNilResponse(t, resp, err) - require.Equal(t, true, resp.Data["tidy_expired_issuers"]) - require.Equal(t, 5, resp.Data["issuer_safety_buffer"]) -} - -// TestCertStorageMetrics ensures that when enabled, metrics are able to count the number of certificates in storage and -// number of revoked certificates in storage. Moreover, this test ensures that the gauge is emitted periodically, so -// that the metric does not disappear or go stale. -func TestCertStorageMetrics(t *testing.T) { - // This tests uses the same setup as TestAutoTidy - newPeriod := 1 * time.Second - - // We set up a metrics accumulator - inmemSink := metrics.NewInmemSink( - 2*newPeriod, // A short time period is ideal here to test metrics are emitted every periodic func - 10*newPeriod) // Do not keep a huge amount of metrics in the sink forever, clear them out to save memory usage. - - metricsConf := metrics.DefaultConfig("") - metricsConf.EnableHostname = false - metricsConf.EnableHostnameLabel = false - metricsConf.EnableServiceLabel = false - metricsConf.EnableTypePrefix = false - - _, err := metrics.NewGlobal(metricsConf, inmemSink) - if err != nil { - t.Fatal(err) - } - - // This test requires the periodicFunc to trigger, which requires we stand - // up a full test cluster. - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - // See notes below about usage of /sys/raw for reading cluster - // storage without barrier encryption. - EnableRaw: true, - RollbackPeriod: newPeriod, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI - err = client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "10m", - MaxLeaseTTL: "60m", - }, - }) - require.NoError(t, err) - - // Generate root. - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["issuer_id"]) - - // Set up a testing role. - _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - }) - require.NoError(t, err) - - // Run tidy so that tidy-status is not empty - _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ - "tidy_revoked_certs": true, - }) - require.NoError(t, err) - - // Since certificate counts are off by default, we shouldn't see counts in the tidy status - tidyStatus, err := client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } - // backendUUID should exist, we need this for metrics - backendUUID := tidyStatus.Data["internal_backend_uuid"].(string) - // "current_cert_store_count", "current_revoked_cert_count" - countData, ok := tidyStatus.Data["current_cert_store_count"] - if ok && countData != nil { - t.Fatalf("Certificate counting should be off by default, but current cert store count %v appeared in tidy status in unconfigured mount", countData) - } - revokedCountData, ok := tidyStatus.Data["current_revoked_cert_count"] - if ok && revokedCountData != nil { - t.Fatalf("Certificate counting should be off by default, but revoked cert count %v appeared in tidy status in unconfigured mount", revokedCountData) - } - - // Since certificate counts are off by default, those metrics should not exist yet - mostRecentInterval := inmemSink.Data()[len(inmemSink.Data())-1] - _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] - if ok { - t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") - } - _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] - if ok { - t.Fatalf("Certificate counting should be off by default, but total certificate count was emitted as a metric in an unconfigured mount") - } - - // Write the auto-tidy config. - _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": true, - "interval_duration": "1s", - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "safety_buffer": "1s", - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": false, - }) - require.NoError(t, err) - - // Reload the Mount - Otherwise Stored Certificate Counts Will Not Be Populated - _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ - "plugin": "pki", - }) - - // By reading the auto-tidy endpoint, we ensure that initialize has completed (which has a write lock on auto-tidy) - _, err = client.Logical().Read("/pki/config/auto-tidy") - if err != nil { - t.Fatal(err) - } - - // Since publish_stored_certificate_count_metrics is still false, these metrics should still not exist yet - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] - _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] - if ok { - t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") - } - _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] - if ok { - t.Fatalf("Certificate counting should be off by default, but total certificate count was emitted as a metric in an unconfigured mount") - } - - // But since certificate counting is on, the metrics should exist on tidyStatus endpoint: - tidyStatus, err = client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } - // backendUUID should exist, we need this for metrics - backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) - // "current_cert_store_count", "current_revoked_cert_count" - certStoreCount, ok := tidyStatus.Data["current_cert_store_count"] - if !ok { - t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") - } - if certStoreCount != json.Number("1") { - t.Fatalf("Only created one certificate, but a got a certificate count of %v", certStoreCount) - } - revokedCertCount, ok := tidyStatus.Data["current_revoked_cert_count"] - if !ok { - t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") - } - if revokedCertCount != json.Number("0") { - t.Fatalf("Have not yet revoked a certificate, but got a revoked cert store count of %v", revokedCertCount) - } - - // Write the auto-tidy config, again, this time turning on metrics - _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": true, - "interval_duration": "1s", - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "safety_buffer": "1s", - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": true, - }) - require.NoError(t, err) - - // Issue a cert and revoke it. - resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ - "common_name": "example.com", - "ttl": "10s", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["serial_number"]) - require.NotEmpty(t, resp.Data["certificate"]) - leafSerial := resp.Data["serial_number"].(string) - leafCert := parseCert(t, resp.Data["certificate"].(string)) - - // Read cert before revoking - resp, err = client.Logical().Read("pki/cert/" + leafSerial) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - revocationTime, err := (resp.Data["revocation_time"].(json.Number)).Int64() - require.Equal(t, int64(0), revocationTime, "revocation time was not zero") - require.Empty(t, resp.Data["revocation_time_rfc3339"], "revocation_time_rfc3339 was not empty") - require.Empty(t, resp.Data["issuer_id"], "issuer_id was not empty") - - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": leafSerial, - }) - require.NoError(t, err) - - // We read the auto-tidy endpoint again, to ensure any metrics logic has completed (lock on config) - _, err = client.Logical().Read("/pki/config/auto-tidy") - if err != nil { - t.Fatal(err) - } - - // Check Metrics After Cert Has Be Created and Revoked - tidyStatus, err = client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } - backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) - certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] - if !ok { - t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") - } - if certStoreCount != json.Number("2") { - t.Fatalf("Created root and leaf certificate, but a got a certificate count of %v", certStoreCount) - } - revokedCertCount, ok = tidyStatus.Data["current_revoked_cert_count"] - if !ok { - t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") - } - if revokedCertCount != json.Number("1") { - t.Fatalf("Revoked one certificate, but got a revoked cert store count of %v", revokedCertCount) - } - // This should now be initialized - certCountError, ok := tidyStatus.Data["certificate_counting_error"] - if ok && certCountError.(string) != "" { - t.Fatalf("Expected certificate count error to disappear after initialization, but got error %v", certCountError) - } - - testhelpers.RetryUntil(t, newPeriod*5, func() error { - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] - revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] - if !ok { - return errors.New("turned on metrics, but revoked cert count was not emitted") - } - if revokedCertCountGaugeValue.Value != 1 { - return fmt.Errorf("revoked one certificate, but metrics emitted a revoked cert store count of %v", revokedCertCountGaugeValue) - } - certStoreCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] - if !ok { - return errors.New("turned on metrics, but total certificate count was not emitted") - } - if certStoreCountGaugeValue.Value != 2 { - return fmt.Errorf("stored two certificiates, but total certificate count emitted was %v", certStoreCountGaugeValue.Value) - } - return nil - }) - - // Wait for cert to expire and the safety buffer to elapse. - time.Sleep(time.Until(leafCert.NotAfter) + 3*time.Second) - - // Wait for auto-tidy to run afterwards. - var foundTidyRunning string - var foundTidyFinished bool - timeoutChan := time.After(120 * time.Second) - for { - if foundTidyRunning != "" && foundTidyFinished { - break - } - - select { - case <-timeoutChan: - t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) - default: - time.Sleep(250 * time.Millisecond) - - resp, err = client.Logical().Read("pki/tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["state"]) - require.NotEmpty(t, resp.Data["time_started"]) - state := resp.Data["state"].(string) - started := resp.Data["time_started"].(string) - t.Logf("Resp: %v", resp.Data) - - // We want the _next_ tidy run after the cert expires. This - // means if we're currently finished when we hit this the - // first time, we want to wait for the next run. - if foundTidyRunning == "" { - foundTidyRunning = started - } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { - foundTidyFinished = true - } - } - } - - // After Tidy, Cert Store Count Should Still Be Available, and Be Updated: - // Check Metrics After Cert Has Be Created and Revoked - tidyStatus, err = client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } - backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) - // "current_cert_store_count", "current_revoked_cert_count" - certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] - if !ok { - t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") - } - if certStoreCount != json.Number("1") { - t.Fatalf("Created root and leaf certificate, deleted leaf, but a got a certificate count of %v", certStoreCount) - } - revokedCertCount, ok = tidyStatus.Data["current_revoked_cert_count"] - if !ok { - t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") - } - if revokedCertCount != json.Number("0") { - t.Fatalf("Revoked certificate has been tidied, but got a revoked cert store count of %v", revokedCertCount) - } - - testhelpers.RetryUntil(t, newPeriod*5, func() error { - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] - revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] - if !ok { - return errors.New("turned on metrics, but revoked cert count was not emitted") - } - if revokedCertCountGaugeValue.Value != 0 { - return fmt.Errorf("revoked certificate has been tidied, but metrics emitted a revoked cert store count of %v", revokedCertCountGaugeValue) - } - certStoreCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] - if !ok { - return errors.New("turned on metrics, but total certificate count was not emitted") - } - if certStoreCountGaugeValue.Value != 1 { - return fmt.Errorf("only one of two certificates left after tidy, but total certificate count emitted was %v", certStoreCountGaugeValue.Value) - } - return nil - }) -} diff --git a/builtin/logical/pki/periodic.go b/builtin/logical/pki/periodic.go deleted file mode 100644 index 77ff31212ef96..0000000000000 --- a/builtin/logical/pki/periodic.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "crypto/x509" - "errors" - "fmt" - "sync/atomic" - "time" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - minUnifiedTransferDelay = 30 * time.Minute -) - -type unifiedTransferStatus struct { - isRunning atomic.Bool - lastRun time.Time - forceRerun atomic.Bool -} - -func (uts *unifiedTransferStatus) forceRun() { - uts.forceRerun.Store(true) -} - -func newUnifiedTransferStatus() *unifiedTransferStatus { - return &unifiedTransferStatus{} -} - -// runUnifiedTransfer meant to run as a background, this will process all and -// send all missing local revocation entries to the unified space if the feature -// is enabled. -func runUnifiedTransfer(sc *storageContext) { - b := sc.Backend - status := b.unifiedTransferStatus - - isPerfStandby := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) - - if isPerfStandby || b.System().LocalMount() { - // We only do this on active enterprise nodes, when we aren't a local mount - return - } - - config, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - b.Logger().Error("failed to retrieve crl config from storage for unified transfer background process", - "error", err) - return - } - - if !config.UnifiedCRL { - // Feature is disabled, no need to run - return - } - - clusterId, err := b.System().ClusterID(sc.Context) - if err != nil { - b.Logger().Error("failed to fetch cluster id for unified transfer background process", - "error", err) - return - } - - if !status.isRunning.CompareAndSwap(false, true) { - b.Logger().Debug("an existing unified transfer process is already running") - return - } - defer status.isRunning.Store(false) - - // Because access to lastRun is not locked, we need to delay this check - // until after we grab the isRunning CAS lock. - if !status.lastRun.IsZero() { - // We have run before, we only run again if we have - // been requested to forceRerun, and we haven't run since our - // minimum delay. - if !(status.forceRerun.Load() && time.Since(status.lastRun) < minUnifiedTransferDelay) { - return - } - } - - // Reset our flag before we begin, we do this before we start as - // we can't guarantee that we can properly parse/fix the error from an - // error that comes in from the revoke API after that. This will - // force another run, which worst case, we will fix it on the next - // periodic function call that passes our min delay. - status.forceRerun.Store(false) - - err = doUnifiedTransferMissingLocalSerials(sc, clusterId) - if err != nil { - b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) - status.forceRerun.Store(true) - } else { - if config.EnableDelta { - err = doUnifiedTransferMissingDeltaWALSerials(sc, clusterId) - if err != nil { - b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) - status.forceRerun.Store(true) - } - } - } - - status.lastRun = time.Now() -} - -func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) error { - localRevokedSerialNums, err := sc.listRevokedCerts() - if err != nil { - return err - } - if len(localRevokedSerialNums) == 0 { - // No local certs to transfer, no further work to do. - return nil - } - - unifiedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) - if err != nil { - return err - } - unifiedCertLookup := sliceToMapKey(unifiedSerials) - - errCount := 0 - for i, serialNum := range localRevokedSerialNums { - if i%25 == 0 { - config, _ := sc.Backend.crlBuilder.getConfigWithUpdate(sc) - if config != nil && !config.UnifiedCRL { - return errors.New("unified crl has been disabled after we started, stopping") - } - } - if _, ok := unifiedCertLookup[serialNum]; !ok { - err := readRevocationEntryAndTransfer(sc, serialNum) - if err != nil { - errCount++ - sc.Backend.Logger().Error("Failed transferring local revocation to unified space", - "serial", serialNum, "error", err) - } - } - } - - if errCount > 0 { - sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local serials to unified storage", errCount)) - } - - return nil -} - -func doUnifiedTransferMissingDeltaWALSerials(sc *storageContext, clusterId string) error { - // We need to do a similar thing for Delta WAL entry certificates. - // When the delta WAL failed to write for one or more entries, - // we'll need to replicate these up to the primary cluster. When it - // has performed a new delta WAL build, it will empty storage and - // update to a last written WAL entry that exceeds what we've seen - // locally. - thisUnifiedWALEntryPath := unifiedDeltaWALPath + deltaWALLastRevokedSerialName - lastUnifiedWALEntry, err := getLastWALSerial(sc, thisUnifiedWALEntryPath) - if err != nil { - return fmt.Errorf("failed to fetch last cross-cluster unified revoked delta WAL serial number: %w", err) - } - - lastLocalWALEntry, err := getLastWALSerial(sc, localDeltaWALLastRevokedSerial) - if err != nil { - return fmt.Errorf("failed to fetch last locally revoked delta WAL serial number: %w", err) - } - - // We now need to transfer all the entries and then write the last WAL - // entry at the end. Start by listing all certificates; any missing - // certificates will be copied over and then the WAL entry will be - // updated once. - // - // We do not delete entries either locally or remotely, as either - // cluster could've rebuilt delta CRLs with out-of-sync information, - // removing some entries (and, we cannot differentiate between these - // two cases). On next full CRL rebuild (on either cluster), the state - // should get synchronized, and future delta CRLs after this function - // returns without issue will see the remaining entries. - // - // Lastly, we need to ensure we don't accidentally write any unified - // delta WAL entries that aren't present in the main cross-cluster - // revoked storage location. This would mean the above function failed - // to copy them for some reason, despite them presumably appearing - // locally. - _unifiedWALEntries, err := sc.Storage.List(sc.Context, unifiedDeltaWALPath) - if err != nil { - return fmt.Errorf("failed to list cross-cluster unified delta WAL storage: %w", err) - } - unifiedWALEntries := sliceToMapKey(_unifiedWALEntries) - - _unifiedRevokedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) - if err != nil { - return fmt.Errorf("failed to list cross-cluster revoked certificates: %w", err) - } - unifiedRevokedSerials := sliceToMapKey(_unifiedRevokedSerials) - - localWALEntries, err := sc.Storage.List(sc.Context, localDeltaWALPath) - if err != nil { - return fmt.Errorf("failed to list local delta WAL storage: %w", err) - } - - if lastUnifiedWALEntry == lastLocalWALEntry && len(_unifiedWALEntries) == len(localWALEntries) { - // Writing the last revoked WAL entry is the last thing that we do. - // Because these entries match (across clusters) and we have the same - // number of entries, assume we don't have anything to sync and exit - // early. - // - // We need both checks as, in the event of PBPWF failing and then - // returning while more revocations are happening, we could have - // been schedule to run, but then skip running (if only the first - // condition was checked) because a later revocation succeeded - // in writing a unified WAL entry, before we started replicating - // the rest back up. - // - // The downside of this approach is that, if the main cluster - // does a full rebuild in the mean time, we could re-sync more - // entries back up to the primary cluster that are already - // included in the complete CRL. Users can manually rebuild the - // full CRL (clearing these duplicate delta CRL entries) if this - // affects them. - return nil - } - - errCount := 0 - for index, serial := range localWALEntries { - if index%25 == 0 { - config, _ := sc.Backend.crlBuilder.getConfigWithUpdate(sc) - if config != nil && (!config.UnifiedCRL || !config.EnableDelta) { - return errors.New("unified or delta CRLs have been disabled after we started, stopping") - } - } - - if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { - // Skip our special serial numbers. - continue - } - - _, isAlreadyPresent := unifiedWALEntries[serial] - if isAlreadyPresent { - // Serial exists on both local and unified cluster. We're - // presuming we don't need to read and re-write these entries - // and that only missing entries need to be updated. - continue - } - - _, isRevokedCopied := unifiedRevokedSerials[serial] - if !isRevokedCopied { - // We need to wait here to copy over. - errCount += 1 - sc.Backend.Logger().Debug("Delta WAL exists locally, but corresponding cross-cluster full revocation entry is missing; skipping", "serial", serial) - continue - } - - // All good: read the local entry and write to the remote variant. - localPath := localDeltaWALPath + serial - unifiedPath := unifiedDeltaWALPath + serial - - entry, err := sc.Storage.Get(sc.Context, localPath) - if err != nil || entry == nil { - errCount += 1 - sc.Backend.Logger().Error("Failed reading local delta WAL entry to copy to cross-cluster", "serial", serial, "err", err) - continue - } - - entry.Key = unifiedPath - err = sc.Storage.Put(sc.Context, entry) - if err != nil { - errCount += 1 - sc.Backend.Logger().Error("Failed sync local delta WAL entry to cross-cluster unified delta WAL location", "serial", serial, "err", err) - continue - } - } - - if errCount > 0 { - // See note above about why we don't fail here. - sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local delta WAL serials to unified storage", errCount)) - return nil - } - - // Everything worked. Here, we can write over the delta WAL last revoked - // value. By using the earlier value, even if new revocations have - // occurred, we ensure any further missing entries can be handled in the - // next round. - lastRevSerial := lastWALInfo{Serial: lastLocalWALEntry} - lastWALEntry, err := logical.StorageEntryJSON(thisUnifiedWALEntryPath, lastRevSerial) - if err != nil { - return fmt.Errorf("unable to create cross-cluster unified last delta CRL WAL entry: %w", err) - } - if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { - return fmt.Errorf("error saving cross-cluster unified last delta CRL WAL entry: %w", err) - } - - return nil -} - -func readRevocationEntryAndTransfer(sc *storageContext, serial string) error { - hyphenSerial := normalizeSerial(serial) - revInfo, err := sc.fetchRevocationInfo(hyphenSerial) - if err != nil { - return fmt.Errorf("failed loading revocation entry for serial: %s: %w", serial, err) - } - if revInfo == nil { - sc.Backend.Logger().Debug("no certificate revocation entry for serial", "serial", serial) - return nil - } - cert, err := x509.ParseCertificate(revInfo.CertificateBytes) - if err != nil { - sc.Backend.Logger().Debug("failed parsing certificate stored in revocation entry for serial", - "serial", serial, "error", err) - return nil - } - if revInfo.CertificateIssuer == "" { - // No certificate issuer assigned to this serial yet, just drop it for now, - // as a crl rebuild/tidy needs to happen - return nil - } - - revocationTime := revInfo.RevocationTimeUTC - if revInfo.RevocationTimeUTC.IsZero() { - // Legacy revocation entries only had this field and not revocationTimeUTC set... - revocationTime = time.Unix(revInfo.RevocationTime, 0) - } - - if time.Now().After(cert.NotAfter) { - // ignore transferring this entry as it has already expired. - return nil - } - - entry := &unifiedRevocationEntry{ - SerialNumber: hyphenSerial, - CertExpiration: cert.NotAfter, - RevocationTimeUTC: revocationTime, - CertificateIssuer: revInfo.CertificateIssuer, - } - - return writeUnifiedRevocationEntry(sc, entry) -} diff --git a/builtin/logical/pki/secret_certs.go b/builtin/logical/pki/secret_certs.go index 11ebcd2ac7cb3..79fa410843cd0 100644 --- a/builtin/logical/pki/secret_certs.go +++ b/builtin/logical/pki/secret_certs.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( "context" - "crypto/x509" "fmt" "github.com/hashicorp/vault/sdk/framework" @@ -52,35 +48,5 @@ func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, _ b.revokeStorageLock.Lock() defer b.revokeStorageLock.Unlock() - sc := b.makeStorageContext(ctx, req.Storage) - serial := serialInt.(string) - - certEntry, err := fetchCertBySerial(sc, "certs/", serial) - if err != nil { - return nil, err - } - if certEntry == nil { - // We can't write to revoked/ or update the CRL anyway because we don't have the cert, - // and there's no reason to expect this will work on a subsequent - // retry. Just give up and let the lease get deleted. - b.Logger().Warn("expired certificate revoke failed because not found in storage, treating as success", "serial", serial) - return nil, nil - } - - cert, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return nil, fmt.Errorf("error parsing certificate: %w", err) - } - - // Compatibility: Don't revoke CAs if they had leases. New CAs going forward aren't issued leases. - if cert.IsCA { - return nil, nil - } - - config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) - } - - return revokeCert(sc, config, cert) + return revokeCert(ctx, b, req, serialInt.(string), true) } diff --git a/builtin/logical/pki/storage.go b/builtin/logical/pki/storage.go index 52b0faf2ca3b6..6a2cf2fc23a70 100644 --- a/builtin/logical/pki/storage.go +++ b/builtin/logical/pki/storage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -8,41 +5,31 @@ import ( "context" "crypto" "crypto/x509" - "errors" "fmt" "sort" "strings" "time" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) -var ErrStorageItemNotFound = errors.New("storage item not found") - const ( - storageKeyConfig = "config/keys" - storageIssuerConfig = "config/issuers" - keyPrefix = "config/key/" - issuerPrefix = "config/issuer/" - storageLocalCRLConfig = "crls/config" - storageUnifiedCRLConfig = "unified-crls/config" + storageKeyConfig = "config/keys" + storageIssuerConfig = "config/issuers" + keyPrefix = "config/key/" + issuerPrefix = "config/issuer/" + storageLocalCRLConfig = "crls/config" legacyMigrationBundleLogKey = "config/legacyMigrationBundleLog" legacyCertBundlePath = "config/ca_bundle" - legacyCertBundleBackupPath = "config/ca_bundle.bak" legacyCRLPath = "crl" deltaCRLPath = "delta-crl" deltaCRLPathSuffix = "-delta" - unifiedCRLPath = "unified-crl" - unifiedDeltaCRLPath = "unified-delta-crl" - unifiedCRLPathPrefix = "unified-" autoTidyConfigPath = "config/auto-tidy" - clusterConfigPath = "config/cluster" // Used as a quick sanity check for a reference id lookups... uuidLength = 36 @@ -181,19 +168,18 @@ type issuerEntry struct { Revoked bool `json:"revoked"` RevocationTime int64 `json:"revocation_time"` RevocationTimeUTC time.Time `json:"revocation_time_utc"` - AIAURIs *aiaConfigEntry `json:"aia_uris,omitempty"` + AIAURIs *certutil.URLEntries `json:"aia_uris,omitempty"` LastModified time.Time `json:"last_modified"` Version uint `json:"version"` } -type internalCRLConfigEntry struct { +type localCRLConfigEntry struct { IssuerIDCRLMap map[issuerID]crlID `json:"issuer_id_crl_map"` CRLNumberMap map[crlID]int64 `json:"crl_number_map"` LastCompleteNumberMap map[crlID]int64 `json:"last_complete_number_map"` CRLExpirationMap map[crlID]time.Time `json:"crl_expiration_map"` LastModified time.Time `json:"last_modified"` DeltaLastModified time.Time `json:"delta_last_modified"` - UseGlobalQueue bool `json:"cross_cluster_revocation"` } type keyConfigEntry struct { @@ -209,70 +195,6 @@ type issuerConfigEntry struct { DefaultFollowsLatestIssuer bool `json:"default_follows_latest_issuer"` } -type clusterConfigEntry struct { - Path string `json:"path"` - AIAPath string `json:"aia_path"` -} - -type aiaConfigEntry struct { - IssuingCertificates []string `json:"issuing_certificates"` - CRLDistributionPoints []string `json:"crl_distribution_points"` - OCSPServers []string `json:"ocsp_servers"` - EnableTemplating bool `json:"enable_templating"` -} - -func (c *aiaConfigEntry) toURLEntries(sc *storageContext, issuer issuerID) (*certutil.URLEntries, error) { - if len(c.IssuingCertificates) == 0 && len(c.CRLDistributionPoints) == 0 && len(c.OCSPServers) == 0 { - return &certutil.URLEntries{}, nil - } - - result := certutil.URLEntries{ - IssuingCertificates: c.IssuingCertificates[:], - CRLDistributionPoints: c.CRLDistributionPoints[:], - OCSPServers: c.OCSPServers[:], - } - - if c.EnableTemplating { - cfg, err := sc.getClusterConfig() - if err != nil { - return nil, fmt.Errorf("error fetching cluster-local address config: %w", err) - } - - for name, source := range map[string]*[]string{ - "issuing_certificates": &result.IssuingCertificates, - "crl_distribution_points": &result.CRLDistributionPoints, - "ocsp_servers": &result.OCSPServers, - } { - templated := make([]string, len(*source)) - for index, uri := range *source { - if strings.Contains(uri, "{{cluster_path}}") && len(cfg.Path) == 0 { - return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (path)") - } - if strings.Contains(uri, "{{cluster_aia_path}}") && len(cfg.AIAPath) == 0 { - return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (aia_path)") - } - if strings.Contains(uri, "{{issuer_id}}") && len(issuer) == 0 { - // Elide issuer AIA info as we lack an issuer_id. - return nil, fmt.Errorf("unable to template AIA URLs as we lack an issuer_id for this operation") - } - - uri = strings.ReplaceAll(uri, "{{cluster_path}}", cfg.Path) - uri = strings.ReplaceAll(uri, "{{cluster_aia_path}}", cfg.AIAPath) - uri = strings.ReplaceAll(uri, "{{issuer_id}}", issuer.String()) - templated[index] = uri - } - - if uri := validateURLs(templated); uri != "" { - return nil, fmt.Errorf("error validating templated %v; invalid URI: %v", name, uri) - } - - *source = templated - } - } - - return &result, nil -} - type storageContext struct { Context context.Context Storage logical.Storage @@ -287,15 +209,6 @@ func (b *backend) makeStorageContext(ctx context.Context, s logical.Storage) *st } } -func (sc *storageContext) WithFreshTimeout(timeout time.Duration) (*storageContext, context.CancelFunc) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - return &storageContext{ - Context: ctx, - Storage: sc.Storage, - Backend: sc.Backend, - }, cancel -} - func (sc *storageContext) listKeys() ([]keyID, error) { strList, err := sc.Storage.List(sc.Context, keyPrefix) if err != nil { @@ -567,7 +480,7 @@ func (i issuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { cert, err := i.GetCertificate() if err != nil { - return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %w", err) + return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %v", err) } switch cert.PublicKeyAlgorithm { @@ -593,26 +506,17 @@ func (i issuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) } -func (i issuerEntry) GetAIAURLs(sc *storageContext) (*certutil.URLEntries, error) { +func (i issuerEntry) GetAIAURLs(sc *storageContext) (urls *certutil.URLEntries, err error) { // Default to the per-issuer AIA URLs. - entries := i.AIAURIs + urls = i.AIAURIs // If none are set (either due to a nil entry or because no URLs have // been provided), fall back to the global AIA URL config. - if entries == nil || (len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0) { - var err error - - entries, err = getGlobalAIAURLs(sc.Context, sc.Storage) - if err != nil { - return nil, err - } + if urls == nil || (len(urls.IssuingCertificates) == 0 && len(urls.CRLDistributionPoints) == 0 && len(urls.OCSPServers) == 0) { + urls, err = getGlobalAIAURLs(sc.Context, sc.Storage) } - if entries == nil { - return &certutil.URLEntries{}, nil - } - - return entries.toURLEntries(sc, i.ID) + return urls, err } func (sc *storageContext) listIssuers() ([]issuerID, error) { @@ -930,8 +834,8 @@ func areCertificatesEqual(cert1 *x509.Certificate, cert2 *x509.Certificate) bool return bytes.Equal(cert1.Raw, cert2.Raw) } -func (sc *storageContext) _setInternalCRLConfig(mapping *internalCRLConfigEntry, path string) error { - json, err := logical.StorageEntryJSON(path, mapping) +func (sc *storageContext) setLocalCRLConfig(mapping *localCRLConfigEntry) error { + json, err := logical.StorageEntryJSON(storageLocalCRLConfig, mapping) if err != nil { return err } @@ -939,21 +843,13 @@ func (sc *storageContext) _setInternalCRLConfig(mapping *internalCRLConfigEntry, return sc.Storage.Put(sc.Context, json) } -func (sc *storageContext) setLocalCRLConfig(mapping *internalCRLConfigEntry) error { - return sc._setInternalCRLConfig(mapping, storageLocalCRLConfig) -} - -func (sc *storageContext) setUnifiedCRLConfig(mapping *internalCRLConfigEntry) error { - return sc._setInternalCRLConfig(mapping, storageUnifiedCRLConfig) -} - -func (sc *storageContext) _getInternalCRLConfig(path string) (*internalCRLConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, path) +func (sc *storageContext) getLocalCRLConfig() (*localCRLConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, storageLocalCRLConfig) if err != nil { return nil, err } - mapping := &internalCRLConfigEntry{} + mapping := &localCRLConfigEntry{} if entry != nil { if err := entry.DecodeJSON(mapping); err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode cluster-local CRL configuration: %v", err)} @@ -993,14 +889,6 @@ func (sc *storageContext) _getInternalCRLConfig(path string) (*internalCRLConfig return mapping, nil } -func (sc *storageContext) getLocalCRLConfig() (*internalCRLConfigEntry, error) { - return sc._getInternalCRLConfig(storageLocalCRLConfig) -} - -func (sc *storageContext) getUnifiedCRLConfig() (*internalCRLConfigEntry, error) { - return sc._getInternalCRLConfig(storageUnifiedCRLConfig) -} - func (sc *storageContext) setKeysConfig(config *keyConfigEntry) error { json, err := logical.StorageEntryJSON(storageKeyConfig, config) if err != nil { @@ -1110,7 +998,7 @@ func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, er return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} } -func (sc *storageContext) resolveIssuerCRLPath(reference string, unified bool) (string, error) { +func (sc *storageContext) resolveIssuerCRLPath(reference string) (string, error) { if sc.Backend.useLegacyBundleCaStorage() { return legacyCRLPath, nil } @@ -1120,23 +1008,13 @@ func (sc *storageContext) resolveIssuerCRLPath(reference string, unified bool) ( return legacyCRLPath, err } - configPath := storageLocalCRLConfig - if unified { - configPath = storageUnifiedCRLConfig - } - - crlConfig, err := sc._getInternalCRLConfig(configPath) + crlConfig, err := sc.getLocalCRLConfig() if err != nil { return legacyCRLPath, err } if crlId, ok := crlConfig.IssuerIDCRLMap[issuer]; ok && len(crlId) > 0 { - path := fmt.Sprintf("crls/%v", crlId) - if unified { - path = unifiedCRLPathPrefix + path - } - - return path, nil + return fmt.Sprintf("crls/%v", crlId), nil } return legacyCRLPath, fmt.Errorf("unable to find CRL for issuer: id:%v/ref:%v", issuer, reference) @@ -1325,16 +1203,6 @@ func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { result.Expiry = defaultCrlConfig.Expiry } - isLocalMount := sc.Backend.System().LocalMount() - if (!constants.IsEnterprise || isLocalMount) && (result.UnifiedCRLOnExistingPaths || result.UnifiedCRL || result.UseGlobalQueue) { - // An end user must have had Enterprise, enabled the unified config args and then downgraded to OSS. - sc.Backend.Logger().Warn("Not running Vault Enterprise or using a local mount, " + - "disabling unified_crl, unified_crl_on_existing_paths and cross_cluster_revocation config flags.") - result.UnifiedCRLOnExistingPaths = false - result.UnifiedCRL = false - result.UseGlobalQueue = false - } - return &result, nil } @@ -1354,10 +1222,6 @@ func (sc *storageContext) getAutoTidyConfig() (*tidyConfig, error) { return nil, err } - if result.IssuerSafetyBuffer == 0 { - result.IssuerSafetyBuffer = defaultTidyConfig.IssuerSafetyBuffer - } - return &result, nil } @@ -1367,83 +1231,5 @@ func (sc *storageContext) writeAutoTidyConfig(config *tidyConfig) error { return err } - err = sc.Storage.Put(sc.Context, entry) - if err != nil { - return err - } - - sc.Backend.publishCertCountMetrics.Store(config.PublishMetrics) - - // To Potentially Disable Certificate Counting - if config.MaintainCount == false { - certCountWasEnabled := sc.Backend.certCountEnabled.Swap(config.MaintainCount) - if certCountWasEnabled { - sc.Backend.certsCounted.Store(true) - sc.Backend.certCountError = "Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts" - sc.Backend.possibleDoubleCountedSerials = nil // This won't stop a list operation, but will stop an expensive clean-up during initialize - sc.Backend.possibleDoubleCountedRevokedSerials = nil // This won't stop a list operation, but will stop an expensive clean-up during initialize - sc.Backend.certCount.Store(0) - sc.Backend.revokedCertCount.Store(0) - } - } else { // To Potentially Enable Certificate Counting - if sc.Backend.certCountEnabled.Load() == false { - // We haven't written "re-enable certificate counts" outside the initialize function - // Any call derived call to do so is likely to time out on ~2 million certs - sc.Backend.certCountError = "Certificate Counting Has Not Been Initialized, re-initialize this mount" - } - } - - return nil -} - -func (sc *storageContext) listRevokedCerts() ([]string, error) { - list, err := sc.Storage.List(sc.Context, revokedPath) - if err != nil { - return nil, fmt.Errorf("failed listing revoked certs: %w", err) - } - - return list, err -} - -func (sc *storageContext) getClusterConfig() (*clusterConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, clusterConfigPath) - if err != nil { - return nil, err - } - - var result clusterConfigEntry - if entry == nil { - return &result, nil - } - - if err = entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (sc *storageContext) writeClusterConfig(config *clusterConfigEntry) error { - entry, err := logical.StorageEntryJSON(clusterConfigPath, config) - if err != nil { - return err - } - return sc.Storage.Put(sc.Context, entry) } - -func (sc *storageContext) fetchRevocationInfo(serial string) (*revocationInfo, error) { - var revInfo *revocationInfo - revEntry, err := fetchCertBySerial(sc, revokedPath, serial) - if err != nil { - return nil, err - } - if revEntry != nil { - err = revEntry.DecodeJSON(&revInfo) - if err != nil { - return nil, fmt.Errorf("error decoding existing revocation info: %w", err) - } - } - - return revInfo, nil -} diff --git a/builtin/logical/pki/storage_migrations.go b/builtin/logical/pki/storage_migrations.go index f4b9237266b76..9104e5c6f3afb 100644 --- a/builtin/logical/pki/storage_migrations.go +++ b/builtin/logical/pki/storage_migrations.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( diff --git a/builtin/logical/pki/storage_migrations_test.go b/builtin/logical/pki/storage_migrations_test.go index 754f3993d14b2..49217bcdf013d 100644 --- a/builtin/logical/pki/storage_migrations_test.go +++ b/builtin/logical/pki/storage_migrations_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -18,7 +15,7 @@ func Test_migrateStorageEmptyStorage(t *testing.T) { t.Parallel() startTime := time.Now() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Reset the version the helper above set to 1. @@ -67,7 +64,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { t.Parallel() startTime := time.Now() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Reset the version the helper above set to 1. @@ -149,7 +146,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { t.Parallel() startTime := time.Now() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Reset the version the helper above set to 1. @@ -255,7 +252,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { func TestMigration_OnceChainRebuild(t *testing.T) { t.Parallel() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Create a legacy CA bundle that we'll migrate to the new layout. We call @@ -365,7 +362,7 @@ func TestMigration_OnceChainRebuild(t *testing.T) { func TestExpectedOpsWork_PreMigration(t *testing.T) { t.Parallel() ctx := context.Background() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") @@ -388,7 +385,7 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { MountPoint: "pki/", }) require.NoError(t, err, "error from creating role") - require.NotNil(t, resp, "got nil response object from creating role") + require.Nil(t, resp, "got non-nil response object from creating role") // List roles resp, err = b.HandleRequest(context.Background(), &logical.Request{ @@ -474,7 +471,7 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { MountPoint: "pki/", }) require.NoError(t, err, "error setting CRL config") - require.NotNil(t, resp, "got nil response setting CRL config") + require.Nil(t, resp, "got non-nil response setting CRL config") // Set URL config resp, err = b.HandleRequest(context.Background(), &logical.Request{ @@ -486,7 +483,8 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { }, MountPoint: "pki/", }) - requireSuccessNonNilResponse(t, resp, err) + require.NoError(t, err, "error setting URL config") + require.Nil(t, resp, "got non-nil response setting URL config") // Make sure we can fetch the old values... for _, path := range []string{"ca/pem", "ca_chain", "cert/" + serialNum, "cert/ca", "cert/crl", "cert/ca_chain", "config/crl", "config/urls"} { @@ -593,190 +591,6 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { requireFailInMigration(t, b, s, logical.ReadOperation, "config/keys") } -func TestBackupBundle(t *testing.T) { - t.Parallel() - ctx := context.Background() - b, s := CreateBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) - - // Reset the version the helper above set to 1. - b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") - - // Create an empty request and tidy configuration for us. - req := &logical.Request{ - Storage: s, - MountPoint: "pki/", - } - cfg := &tidyConfig{ - BackupBundle: true, - IssuerSafetyBuffer: 120 * time.Second, - } - - // Migration should do nothing if we're on an empty mount. - err := b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) - require.NoError(t, err) - requireFileNotExists(t, sc, legacyCertBundlePath) - requireFileNotExists(t, sc, legacyCertBundleBackupPath) - issuerIds, err := sc.listIssuers() - require.NoError(t, err) - require.Empty(t, issuerIds) - keyIds, err := sc.listKeys() - require.NoError(t, err) - require.Empty(t, keyIds) - - // Create a legacy CA bundle and write it out. - bundle := genCertBundle(t, b, s) - json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) - require.NoError(t, err) - err = s.Put(ctx, json) - require.NoError(t, err) - legacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) - - // Doing another tidy should maintain the status quo since we've - // still not done our migration. - err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) - require.NoError(t, err) - requireFileExists(t, sc, legacyCertBundlePath, legacyContents) - requireFileNotExists(t, sc, legacyCertBundleBackupPath) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.Empty(t, issuerIds) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.Empty(t, keyIds) - - // Do a migration; this should provision an issuer and key. - initReq := &logical.InitializationRequest{Storage: s} - err = b.initialize(ctx, initReq) - require.NoError(t, err) - requireFileExists(t, sc, legacyCertBundlePath, legacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - - // Doing another tidy should maintain the status quo since we've - // done our migration too recently relative to the safety buffer. - err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) - require.NoError(t, err) - requireFileExists(t, sc, legacyCertBundlePath, legacyContents) - requireFileNotExists(t, sc, legacyCertBundleBackupPath) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - - // Shortening our buffer should ensure the migration occurs, removing - // the legacy bundle but creating the backup one. - time.Sleep(2 * time.Second) - cfg.IssuerSafetyBuffer = 1 * time.Second - err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) - require.NoError(t, err) - requireFileNotExists(t, sc, legacyCertBundlePath) - requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - - // A new initialization should do nothing. - err = b.initialize(ctx, initReq) - require.NoError(t, err) - requireFileNotExists(t, sc, legacyCertBundlePath) - requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - require.Equal(t, len(issuerIds), 1) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - require.Equal(t, len(keyIds), 1) - - // Restoring the legacy bundles with new issuers should redo the - // migration. - newBundle := genCertBundle(t, b, s) - json, err = logical.StorageEntryJSON(legacyCertBundlePath, newBundle) - require.NoError(t, err) - err = s.Put(ctx, json) - require.NoError(t, err) - newLegacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) - - // -> reinit - err = b.initialize(ctx, initReq) - require.NoError(t, err) - requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) - requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - require.Equal(t, len(issuerIds), 2) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - require.Equal(t, len(keyIds), 2) - - // -> when we tidy again, we'll overwrite the old backup with the new - // one. - time.Sleep(2 * time.Second) - err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) - require.NoError(t, err) - requireFileNotExists(t, sc, legacyCertBundlePath) - requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - - // Finally, restoring the legacy bundle and re-migrating should redo - // the migration. - err = s.Put(ctx, json) - require.NoError(t, err) - requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) - requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) - - // -> overwrite the version and re-migrate - logEntry, err := getLegacyBundleMigrationLog(ctx, s) - require.NoError(t, err) - logEntry.MigrationVersion = 0 - err = setLegacyBundleMigrationLog(ctx, s, logEntry) - require.NoError(t, err) - err = b.initialize(ctx, initReq) - require.NoError(t, err) - requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) - requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - require.Equal(t, len(issuerIds), 2) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) - require.Equal(t, len(keyIds), 2) - - // -> Re-tidy should remove the legacy one. - time.Sleep(2 * time.Second) - err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) - require.NoError(t, err) - requireFileNotExists(t, sc, legacyCertBundlePath) - requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) - issuerIds, err = sc.listIssuers() - require.NoError(t, err) - require.NotEmpty(t, issuerIds) - keyIds, err = sc.listKeys() - require.NoError(t, err) - require.NotEmpty(t, keyIds) -} - // requireFailInMigration validate that we fail the operation with the appropriate error message to the end-user func requireFailInMigration(t *testing.T, b *backend, s logical.Storage, operation logical.Operation, path string) { resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -792,31 +606,6 @@ func requireFailInMigration(t *testing.T, b *backend, s logical.Storage, operati "error message did not contain migration test for op:%s path:%s resp: %#v", operation, path, resp) } -func requireFileNotExists(t *testing.T, sc *storageContext, path string) { - t.Helper() - - entry, err := sc.Storage.Get(sc.Context, path) - require.NoError(t, err) - if entry != nil { - require.Empty(t, entry.Value) - } else { - require.Empty(t, entry) - } -} - -func requireFileExists(t *testing.T, sc *storageContext, path string, contents []byte) []byte { - t.Helper() - - entry, err := sc.Storage.Get(sc.Context, path) - require.NoError(t, err) - require.NotNil(t, entry) - require.NotEmpty(t, entry.Value) - if contents != nil { - require.Equal(t, entry.Value, contents) - } - return entry.Value -} - // Keys to simulate an intermediate CA mount with also-imported root (parent). const ( migIntPrivKey = `-----BEGIN RSA PRIVATE KEY----- diff --git a/builtin/logical/pki/storage_test.go b/builtin/logical/pki/storage_test.go index 625c046d00c86..17700576fda13 100644 --- a/builtin/logical/pki/storage_test.go +++ b/builtin/logical/pki/storage_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -18,7 +15,7 @@ var ctx = context.Background() func Test_ConfigsRoundTrip(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Create an empty key, issuer for testing. @@ -62,7 +59,7 @@ func Test_ConfigsRoundTrip(t *testing.T) { func Test_IssuerRoundTrip(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) issuer1, key1 := genIssuerAndKey(t, b, s) issuer2, key2 := genIssuerAndKey(t, b, s) @@ -108,7 +105,7 @@ func Test_IssuerRoundTrip(t *testing.T) { func Test_KeysIssuerImport(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) issuer1, key1 := genIssuerAndKey(t, b, s) @@ -177,7 +174,7 @@ func Test_KeysIssuerImport(t *testing.T) { func Test_IssuerUpgrade(t *testing.T) { t.Parallel() - b, s := CreateBackendWithStorage(t) + b, s := createBackendWithStorage(t) sc := b.makeStorageContext(ctx, s) // Make sure that we add OCSP signing to v0 issuers if CRLSigning is enabled diff --git a/builtin/logical/pki/storage_unified.go b/builtin/logical/pki/storage_unified.go deleted file mode 100644 index 28c656bb8bb65..0000000000000 --- a/builtin/logical/pki/storage_unified.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pki - -import ( - "fmt" - "strings" - "time" - - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - unifiedRevocationReadPathPrefix = "unified-revocation/" - unifiedRevocationWritePathPrefix = unifiedRevocationReadPathPrefix + "{{clusterId}}/" -) - -type unifiedRevocationEntry struct { - SerialNumber string `json:"-"` - CertExpiration time.Time `json:"certificate_expiration_utc"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - CertificateIssuer issuerID `json:"issuer_id"` -} - -func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*unifiedRevocationEntry, error) { - clusterPaths, err := lookupUnifiedClusterPaths(sc) - if err != nil { - return nil, err - } - - for _, path := range clusterPaths { - serialPath := path + serial - entryRaw, err := sc.Storage.Get(sc.Context, serialPath) - if err != nil { - return nil, err - } - - if entryRaw != nil { - var revEntry unifiedRevocationEntry - if err := entryRaw.DecodeJSON(&revEntry); err != nil { - return nil, fmt.Errorf("failed json decoding of unified entry at path %s: %w", serialPath, err) - } - revEntry.SerialNumber = serial - return &revEntry, nil - } - } - - return nil, nil -} - -func writeUnifiedRevocationEntry(sc *storageContext, ure *unifiedRevocationEntry) error { - json, err := logical.StorageEntryJSON(unifiedRevocationWritePathPrefix+normalizeSerial(ure.SerialNumber), ure) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) -} - -// listClusterSpecificUnifiedRevokedCerts returns a list of revoked certificates from a given cluster -func listClusterSpecificUnifiedRevokedCerts(sc *storageContext, clusterId string) ([]string, error) { - path := unifiedRevocationReadPathPrefix + clusterId + "/" - serials, err := sc.Storage.List(sc.Context, path) - if err != nil { - return nil, err - } - - return serials, nil -} - -// lookupUnifiedClusterPaths returns a map of cluster id to the prefix storage path for that given cluster's -// unified revoked certificates -func lookupUnifiedClusterPaths(sc *storageContext) (map[string]string, error) { - fullPaths := map[string]string{} - - clusterPaths, err := sc.Storage.List(sc.Context, unifiedRevocationReadPathPrefix) - if err != nil { - return nil, err - } - - for _, clusterIdWithSlash := range clusterPaths { - // Only include folder listings, if a file were to be stored under this path ignore it. - if strings.HasSuffix(clusterIdWithSlash, "/") { - clusterId := clusterIdWithSlash[:len(clusterIdWithSlash)-1] // remove trailing / - fullPaths[clusterId] = unifiedRevocationReadPathPrefix + clusterIdWithSlash - } - } - - return fullPaths, nil -} diff --git a/builtin/logical/pki/test_helpers.go b/builtin/logical/pki/test_helpers.go index 0a003438ba8a2..acf083cede614 100644 --- a/builtin/logical/pki/test_helpers.go +++ b/builtin/logical/pki/test_helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -10,15 +7,11 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" - "encoding/asn1" "encoding/pem" "fmt" "io" - "math" - "math/big" "strings" "testing" - "time" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -27,9 +20,7 @@ import ( ) // Setup helpers -func CreateBackendWithStorage(t testing.TB) (*backend, logical.Storage) { - t.Helper() - +func createBackendWithStorage(t testing.TB) (*backend, logical.Storage) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} @@ -45,8 +36,6 @@ func CreateBackendWithStorage(t testing.TB) (*backend, logical.Storage) { } func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { - t.Helper() - err := client.Sys().Mount(path, &api.MountInput{ Type: "pki", Config: api.MountConfigInput{ @@ -59,32 +48,13 @@ func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { // Signing helpers func requireSignedBy(t *testing.T, cert *x509.Certificate, signingCert *x509.Certificate) { - t.Helper() - if err := cert.CheckSignatureFrom(signingCert); err != nil { t.Fatalf("signature verification failed: %v", err) } } -func requireSignedByAtPath(t *testing.T, client *api.Client, leaf *x509.Certificate, path string) { - t.Helper() - - resp, err := client.Logical().Read(path) - require.NoError(t, err, "got unexpected error fetching parent certificate") - require.NotNil(t, resp, "missing response when fetching parent certificate") - require.NotNil(t, resp.Data, "missing data from parent certificate response") - require.NotNil(t, resp.Data["certificate"], "missing certificate field on parent read response") - - parentCert := resp.Data["certificate"].(string) - parent := parseCert(t, parentCert) - - requireSignedBy(t, leaf, parent) -} - // Certificate helper func parseCert(t *testing.T, pemCert string) *x509.Certificate { - t.Helper() - block, _ := pem.Decode([]byte(pemCert)) require.NotNil(t, block, "failed to decode PEM block") @@ -94,8 +64,6 @@ func parseCert(t *testing.T, pemCert string) *x509.Certificate { } func requireMatchingPublicKeys(t *testing.T, cert *x509.Certificate, key crypto.PublicKey) { - t.Helper() - certPubKey := cert.PublicKey areEqual, err := certutil.ComparePublicKeysAndType(certPubKey, key) require.NoError(t, err, "failed comparing public keys: %#v", err) @@ -121,25 +89,17 @@ func getSelfSigned(t *testing.T, subject, issuer *x509.Certificate, key *rsa.Pri // CRL related helpers func getCrlCertificateList(t *testing.T, client *api.Client, mountPoint string) pkix.TBSCertificateList { - t.Helper() - path := fmt.Sprintf("/v1/%s/crl", mountPoint) return getParsedCrlAtPath(t, client, path).TBSCertList } func parseCrlPemBytes(t *testing.T, crlPem []byte) pkix.TBSCertificateList { - t.Helper() - certList, err := x509.ParseCRL(crlPem) require.NoError(t, err) return certList.TBSCertList } func requireSerialNumberInCRL(t *testing.T, revokeList pkix.TBSCertificateList, serialNum string) bool { - if t != nil { - t.Helper() - } - serialsInList := make([]string, 0, len(revokeList.RevokedCertificates)) for _, revokeEntry := range revokeList.RevokedCertificates { formattedSerial := certutil.GetHexFormatted(revokeEntry.SerialNumber.Bytes(), ":") @@ -157,15 +117,11 @@ func requireSerialNumberInCRL(t *testing.T, revokeList pkix.TBSCertificateList, } func getParsedCrl(t *testing.T, client *api.Client, mountPoint string) *pkix.CertificateList { - t.Helper() - path := fmt.Sprintf("/v1/%s/crl", mountPoint) return getParsedCrlAtPath(t, client, path) } func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.CertificateList { - t.Helper() - req := client.NewRequest("GET", path) resp, err := client.RawRequest(req) if err != nil { @@ -189,8 +145,6 @@ func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.Cer } func getParsedCrlFromBackend(t *testing.T, b *backend, s logical.Storage, path string) *pkix.CertificateList { - t.Helper() - resp, err := CBRead(b, s, path) if err != nil { t.Fatal(err) @@ -226,10 +180,6 @@ func CBReq(b *backend, s logical.Storage, operation logical.Operation, path stri return resp, nil } -func CBHeader(b *backend, s logical.Storage, path string) (*logical.Response, error) { - return CBReq(b, s, logical.HeaderOperation, path, make(map[string]interface{})) -} - func CBRead(b *backend, s logical.Storage, path string) (*logical.Response, error) { return CBReq(b, s, logical.ReadOperation, path, make(map[string]interface{})) } @@ -251,8 +201,6 @@ func CBDelete(b *backend, s logical.Storage, path string) (*logical.Response, er } func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { - t.Helper() - var missingFields []string for _, field := range fields { value, ok := resp.Data[field] @@ -265,131 +213,13 @@ func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...stri } func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - t.Helper() - require.NoError(t, err, msgAndArgs...) - if resp.IsError() { - errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) - require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) - } + require.False(t, resp.IsError(), msgAndArgs...) require.NotNil(t, resp, msgAndArgs...) } func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - t.Helper() - require.NoError(t, err, msgAndArgs...) - if resp.IsError() { - errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) - require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) - } - if resp != nil { - msg := fmt.Sprintf("expected nil response but got: %v", resp) - require.Nilf(t, resp, msg, msgAndArgs...) - } -} - -func getCRLNumber(t *testing.T, crl pkix.TBSCertificateList) int { - t.Helper() - - for _, extension := range crl.Extensions { - if extension.Id.Equal(certutil.CRLNumberOID) { - bigInt := new(big.Int) - leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) - require.NoError(t, err, "Failed unmarshalling crl number extension") - require.Empty(t, leftOver, "leftover bytes from unmarshalling crl number extension") - require.True(t, bigInt.IsInt64(), "parsed crl number integer is not an int64") - require.False(t, math.MaxInt <= bigInt.Int64(), "parsed crl number integer can not fit in an int") - return int(bigInt.Int64()) - } - } - - t.Fatalf("failed to find crl number extension") - return 0 -} - -func getCrlReferenceFromDelta(t *testing.T, crl pkix.TBSCertificateList) int { - t.Helper() - - for _, extension := range crl.Extensions { - if extension.Id.Equal(certutil.DeltaCRLIndicatorOID) { - bigInt := new(big.Int) - leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) - require.NoError(t, err, "Failed unmarshalling delta crl indicator extension") - require.Empty(t, leftOver, "leftover bytes from unmarshalling delta crl indicator extension") - require.True(t, bigInt.IsInt64(), "parsed delta crl integer is not an int64") - require.False(t, math.MaxInt <= bigInt.Int64(), "parsed delta crl integer can not fit in an int") - return int(bigInt.Int64()) - } - } - - t.Fatalf("failed to find delta crl indicator extension") - return 0 -} - -// waitForUpdatedCrl will wait until the CRL at the provided path has been reloaded -// up for a maxWait duration and gives up if the timeout has been reached. If a negative -// value for lastSeenCRLNumber is provided, the method will load the current CRL and wait -// for a newer CRL be generated. -func waitForUpdatedCrl(t *testing.T, client *api.Client, crlPath string, lastSeenCRLNumber int, maxWait time.Duration) pkix.TBSCertificateList { - t.Helper() - - newCrl, didTimeOut := waitForUpdatedCrlUntil(t, client, crlPath, lastSeenCRLNumber, maxWait) - if didTimeOut { - t.Fatalf("Timed out waiting for new CRL rebuild on path %s", crlPath) - } - return newCrl.TBSCertList -} - -// waitForUpdatedCrlUntil is a helper method that will wait for a CRL to be updated up until maxWait duration -// or give up and return the last CRL it loaded. It will not fail, if it does not see a new CRL within the -// max duration unlike waitForUpdatedCrl. Returns the last loaded CRL at the provided path and a boolean -// indicating if we hit maxWait duration or not. -func waitForUpdatedCrlUntil(t *testing.T, client *api.Client, crlPath string, lastSeenCrlNumber int, maxWait time.Duration) (*pkix.CertificateList, bool) { - t.Helper() - - crl := getParsedCrlAtPath(t, client, crlPath) - initialCrlRevision := getCRLNumber(t, crl.TBSCertList) - newCrlRevision := initialCrlRevision - - // Short circuit the fetches if we have a version of the CRL we want - if lastSeenCrlNumber > 0 && getCRLNumber(t, crl.TBSCertList) > lastSeenCrlNumber { - return crl, false - } - - start := time.Now() - iteration := 0 - for { - iteration++ - - if time.Since(start) > maxWait { - t.Logf("Timed out waiting for new CRL on path %s after iteration %d, delay: %v", - crlPath, iteration, time.Now().Sub(start)) - return crl, true - } - - crl = getParsedCrlAtPath(t, client, crlPath) - newCrlRevision = getCRLNumber(t, crl.TBSCertList) - if newCrlRevision > initialCrlRevision { - t.Logf("Got new revision of CRL %s from %d to %d after iteration %d, delay %v", - crlPath, initialCrlRevision, newCrlRevision, iteration, time.Now().Sub(start)) - return crl, false - } - - time.Sleep(100 * time.Millisecond) - } -} - -// A quick CRL to string to provide better test error messages -func summarizeCrl(t *testing.T, crl pkix.TBSCertificateList) string { - version := getCRLNumber(t, crl) - serials := []string{} - for _, cert := range crl.RevokedCertificates { - serials = append(serials, normalizeSerialFromBigInt(cert.SerialNumber)) - } - return fmt.Sprintf("CRL Version: %d\n"+ - "This Update: %s\n"+ - "Next Update: %s\n"+ - "Revoked Serial Count: %d\n"+ - "Revoked Serials: %v", version, crl.ThisUpdate, crl.NextUpdate, len(serials), serials) + require.False(t, resp.IsError(), msgAndArgs...) + require.Nil(t, resp, msgAndArgs...) } diff --git a/builtin/logical/pki/util.go b/builtin/logical/pki/util.go index d90e055e6cbcb..7d5a50faddbc6 100644 --- a/builtin/logical/pki/util.go +++ b/builtin/logical/pki/util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pki import ( @@ -11,7 +8,6 @@ import ( "net/http" "regexp" "strings" - "sync" "time" "github.com/hashicorp/vault/sdk/framework" @@ -46,10 +42,6 @@ func serialFromBigInt(serial *big.Int) string { return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) } -func normalizeSerialFromBigInt(serial *big.Int) string { - return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), "-")) -} - func normalizeSerial(serial string) string { return strings.ReplaceAll(strings.ToLower(serial), ":", "-") } @@ -58,12 +50,6 @@ func denormalizeSerial(serial string) string { return strings.ReplaceAll(strings.ToLower(serial), "-", ":") } -func serialToBigInt(serial string) (*big.Int, bool) { - norm := normalizeSerial(serial) - hex := strings.ReplaceAll(norm, "-", "") - return big.NewInt(0).SetString(hex, 16) -} - func kmsRequested(input *inputBundle) bool { return kmsRequestedFromFieldData(input.apiData) } @@ -266,7 +252,7 @@ func parseIfNotModifiedSince(req *logical.Request) (time.Time, error) { headerTimeValue, err := time.Parse(time.RFC1123, headerValue[0]) if err != nil { - return headerTimeValue, fmt.Errorf("failed to parse given value for '%s' header: %w", headerIfModifiedSince, err) + return headerTimeValue, fmt.Errorf("failed to parse given value for '%s' header: %v", headerIfModifiedSince, err) } return headerTimeValue, nil @@ -275,12 +261,10 @@ func parseIfNotModifiedSince(req *logical.Request) (time.Time, error) { type ifModifiedReqType int const ( - ifModifiedUnknown ifModifiedReqType = iota - ifModifiedCA = iota - ifModifiedCRL = iota - ifModifiedDeltaCRL = iota - ifModifiedUnifiedCRL = iota - ifModifiedUnifiedDeltaCRL = iota + ifModifiedUnknown ifModifiedReqType = iota + ifModifiedCA = iota + ifModifiedCRL = iota + ifModifiedDeltaCRL = iota ) type IfModifiedSinceHelper struct { @@ -345,26 +329,6 @@ func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModified if helper.reqType == ifModifiedDeltaCRL { lastModified = crlConfig.DeltaLastModified } - case ifModifiedUnifiedCRL, ifModifiedUnifiedDeltaCRL: - if sc.Backend.crlBuilder.invalidate.Load() { - // When we see the CRL is invalidated, respond with false - // regardless of what the local CRL state says. We've likely - // renamed some issuers or are about to rebuild a new CRL.... - // - // We do this earlier, ahead of config load, as it saves us a - // potential error condition. - return false, nil - } - - crlConfig, err := sc.getUnifiedCRLConfig() - if err != nil { - return false, err - } - - lastModified = crlConfig.LastModified - if helper.reqType == ifModifiedUnifiedDeltaCRL { - lastModified = crlConfig.DeltaLastModified - } case ifModifiedCA: issuerId, err := sc.resolveIssuerReference(string(helper.issuerRef)) if err != nil { @@ -395,109 +359,3 @@ func addWarnings(resp *logical.Response, warnings []string) *logical.Response { } return resp } - -// revocationQueue is a type for allowing invalidateFunc to continue operating -// quickly, while letting periodicFunc slowly sort through all open -// revocations to process. In particular, we do not wish to be holding this -// lock while periodicFunc is running, so iteration returns a full copy of -// the data in this queue. We use a map from serial->[]clusterId, allowing us -// to quickly insert and remove items, without using a slice of tuples. One -// serial might be present on two clusters, if two clusters both have the cert -// stored locally (e.g., via BYOC), which would result in two confirmation -// entries and thus dictating the need for []clusterId. This also lets us -// avoid having duplicate entries. -type revocationQueue struct { - _l sync.Mutex - queue map[string][]string -} - -func newRevocationQueue() *revocationQueue { - return &revocationQueue{ - queue: make(map[string][]string), - } -} - -func (q *revocationQueue) Add(items ...*revocationQueueEntry) { - q._l.Lock() - defer q._l.Unlock() - - for _, item := range items { - var found bool - for _, cluster := range q.queue[item.Serial] { - if cluster == item.Cluster { - found = true - break - } - } - - if !found { - q.queue[item.Serial] = append(q.queue[item.Serial], item.Cluster) - } - } -} - -func (q *revocationQueue) Remove(item *revocationQueueEntry) { - q._l.Lock() - defer q._l.Unlock() - - clusters, present := q.queue[item.Serial] - if !present { - return - } - - if len(clusters) == 0 || (len(clusters) == 1 && clusters[0] == item.Cluster) { - delete(q.queue, item.Serial) - return - } - - result := clusters - for index, cluster := range clusters { - if cluster == item.Cluster { - result = append(clusters[0:index], clusters[index+1:]...) - break - } - } - - q.queue[item.Serial] = result -} - -// As this doesn't depend on any internal state, it should not be called -// unless it is OK to remove any items added since the last Iterate() -// function call. -func (q *revocationQueue) RemoveAll() { - q._l.Lock() - defer q._l.Unlock() - - q.queue = make(map[string][]string) -} - -func (q *revocationQueue) Iterate() []*revocationQueueEntry { - q._l.Lock() - defer q._l.Unlock() - - // Heuristic: by storing by serial, occasionally we'll get double entires - // if it was already revoked, but otherwise we'll be off by fewer when - // building this list. - ret := make([]*revocationQueueEntry, 0, len(q.queue)) - - for serial, clusters := range q.queue { - for _, cluster := range clusters { - ret = append(ret, &revocationQueueEntry{ - Serial: serial, - Cluster: cluster, - }) - } - } - - return ret -} - -// sliceToMapKey return a map that who's keys are entries in a map. -func sliceToMapKey(s []string) map[string]struct{} { - var empty struct{} - myMap := make(map[string]struct{}, len(s)) - for _, s := range s { - myMap[s] = empty - } - return myMap -} diff --git a/builtin/logical/pkiext/README.md b/builtin/logical/pkiext/README.md deleted file mode 100644 index 40364e085c594..0000000000000 --- a/builtin/logical/pkiext/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# What is `pkiext`? - -`pkiext` exists to split the Docker tests into a separate package from the -main PKI tests. Because the Docker tests execute in a smaller runner with -fewer resources, and we were hitting timeouts waiting for the entire PKI -test suite to run, we need to split the larger non-Docker PKI tests from -the smaller Docker tests, to ensure the former can execute. - -This package should lack any non-test related targets. diff --git a/builtin/logical/pkiext/nginx_test.go b/builtin/logical/pkiext/nginx_test.go deleted file mode 100644 index 1532f3e81f6ca..0000000000000 --- a/builtin/logical/pkiext/nginx_test.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pkiext - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/stretchr/testify/require" -) - -var ( - cwRunner *docker.Runner - builtNetwork string - buildClientContainerOnce sync.Once -) - -const ( - protectedFile = `dadgarcorp-internal-protected` - unprotectedFile = `hello-world` - failureIndicator = `THIS-TEST-SHOULD-FAIL` - uniqueHostname = `dadgarcorpvaultpkitestingnginxwgetcurlcontainersexample.com` - containerName = `vault_pki_nginx_integration` -) - -func buildNginxContainer(t *testing.T, root string, crl string, chain string, private string) (func(), string, int, string, string, int) { - containerfile := ` -FROM nginx:latest - -RUN mkdir /www /etc/nginx/ssl && rm /etc/nginx/conf.d/*.conf - -COPY testing.conf /etc/nginx/conf.d/ -COPY root.pem /etc/nginx/ssl/root.pem -COPY fullchain.pem /etc/nginx/ssl/fullchain.pem -COPY privkey.pem /etc/nginx/ssl/privkey.pem -COPY crl.pem /etc/nginx/ssl/crl.pem -COPY /data /www/data -` - - siteConfig := ` -server { - listen 80; - listen [::]:80; - - location / { - return 301 $request_uri; - } -} - -server { - listen 443 ssl; - listen [::]:443 ssl; - - ssl_certificate /etc/nginx/ssl/fullchain.pem; - ssl_certificate_key /etc/nginx/ssl/privkey.pem; - - ssl_client_certificate /etc/nginx/ssl/root.pem; - ssl_crl /etc/nginx/ssl/crl.pem; - ssl_verify_client optional; - - # Magic per: https://serverfault.com/questions/891603/nginx-reverse-proxy-with-optional-ssl-client-authentication - # Only necessary since we're too lazy to setup two different subdomains. - set $ssl_status 'open'; - if ($request_uri ~ protected) { - set $ssl_status 'closed'; - } - - if ($ssl_client_verify != SUCCESS) { - set $ssl_status "$ssl_status-fail"; - } - - if ($ssl_status = "closed-fail") { - return 403; - } - - location / { - root /www/data; - } -} -` - - bCtx := docker.NewBuildContext() - bCtx["testing.conf"] = docker.PathContentsFromString(siteConfig) - bCtx["root.pem"] = docker.PathContentsFromString(root) - bCtx["fullchain.pem"] = docker.PathContentsFromString(chain) - bCtx["privkey.pem"] = docker.PathContentsFromString(private) - bCtx["crl.pem"] = docker.PathContentsFromString(crl) - bCtx["/data/index.html"] = docker.PathContentsFromString(unprotectedFile) - bCtx["/data/protected.html"] = docker.PathContentsFromString(protectedFile) - - imageName := "vault_pki_nginx_integration" - suffix, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("error generating unique suffix: %v", err) - } - imageTag := suffix - - runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: imageName, - ImageTag: imageTag, - ContainerName: containerName, - Ports: []string{"443/tcp"}, - LogConsumer: func(s string) { - if t.Failed() { - t.Logf("container logs: %s", s) - } - }, - }) - if err != nil { - t.Fatalf("Could not provision docker service runner: %s", err) - } - - ctx := context.Background() - output, err := runner.BuildImage(ctx, containerfile, bCtx, - docker.BuildRemove(true), docker.BuildForceRemove(true), - docker.BuildPullParent(true), - docker.BuildTags([]string{imageName + ":" + imageTag})) - if err != nil { - t.Fatalf("Could not build new image: %v", err) - } - - t.Logf("Image build output: %v", string(output)) - - svc, err := runner.StartService(ctx, func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { - // Nginx loads fast, we're too lazy to validate this properly. - time.Sleep(5 * time.Second) - return docker.NewServiceHostPort(host, port), nil - }) - if err != nil { - t.Fatalf("Could not start nginx container: %v", err) - } - - // We also need to find the network address of this node, and return - // the non-local address associated with it so that we can spawn the - // client command on the correct network/port. - networks, err := runner.GetNetworkAndAddresses(svc.Container.ID) - if err != nil { - t.Fatalf("Could not interrogate container for addresses: %v", err) - } - - var networkName string - var networkAddr string - for name, addr := range networks { - if addr == "" { - continue - } - - networkName = name - networkAddr = addr - break - } - - if networkName == "" || networkAddr == "" { - t.Fatalf("failed to get network info for containers: empty network address: %v", networks) - } - - pieces := strings.Split(svc.Config.Address(), ":") - port, _ := strconv.Atoi(pieces[1]) - return svc.Cleanup, pieces[0], port, networkName, networkAddr, 443 -} - -func buildWgetCurlContainer(t *testing.T, network string) { - containerfile := ` -FROM ubuntu:latest - -RUN apt update && DEBIAN_FRONTEND="noninteractive" apt install -y curl wget wget2 -` - - bCtx := docker.NewBuildContext() - - imageName := "vault_pki_wget_curl_integration" - imageTag := "latest" - - var err error - cwRunner, err = docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: imageName, - ImageTag: imageTag, - ContainerName: "vault_pki_wget_curl", - NetworkID: network, - // We want to run sleep in the background so we're not stuck waiting - // for the default ubuntu container's shell to prompt for input. - Entrypoint: []string{"sleep", "45"}, - LogConsumer: func(s string) { - if t.Failed() { - t.Logf("container logs: %s", s) - } - }, - }) - if err != nil { - t.Fatalf("Could not provision docker service runner: %s", err) - } - - ctx := context.Background() - output, err := cwRunner.BuildImage(ctx, containerfile, bCtx, - docker.BuildRemove(true), docker.BuildForceRemove(true), - docker.BuildPullParent(true), - docker.BuildTags([]string{imageName + ":" + imageTag})) - if err != nil { - t.Fatalf("Could not build new image: %v", err) - } - - t.Logf("Image build output: %v", string(output)) -} - -func CheckWithClients(t *testing.T, network string, address string, url string, rootCert string, certificate string, privatekey string) { - // We assume the network doesn't change once assigned. - buildClientContainerOnce.Do(func() { - buildWgetCurlContainer(t, network) - builtNetwork = network - }) - - if builtNetwork != network { - t.Fatalf("failed assumption check: different built network (%v) vs run network (%v); must've changed while running tests", builtNetwork, network) - } - - // Start our service with a random name to not conflict with other - // threads. - ctx := context.Background() - result, err := cwRunner.Start(ctx, true, false) - if err != nil { - t.Fatalf("Could not start golang container for wget/curl checks: %s", err) - } - - // Commands to run after potentially writing the certificate. We - // might augment these if the certificate exists. - // - // We manually add the expected hostname to the local hosts file - // to avoid resolving it over the network and instead resolving it - // to this other container we just started (potentially in parallel - // with other containers). - hostPrimeCmd := []string{"sh", "-c", "echo '" + address + " " + uniqueHostname + "' >> /etc/hosts"} - wgetCmd := []string{"wget", "--verbose", "--ca-certificate=/root.pem", url} - curlCmd := []string{"curl", "--verbose", "--cacert", "/root.pem", url} - - certCtx := docker.NewBuildContext() - certCtx["root.pem"] = docker.PathContentsFromString(rootCert) - if certificate != "" { - // Copy the cert into the newly running container. - certCtx["client-cert.pem"] = docker.PathContentsFromString(certificate) - certCtx["client-privkey.pem"] = docker.PathContentsFromString(privatekey) - - wgetCmd = []string{"wget", "--verbose", "--ca-certificate=/root.pem", "--certificate=/client-cert.pem", "--private-key=/client-privkey.pem", url} - curlCmd = []string{"curl", "--verbose", "--cacert", "/root.pem", "--cert", "/client-cert.pem", "--key", "/client-privkey.pem", url} - } - if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { - t.Fatalf("Could not copy certificate and key into container: %v", err) - } - - for _, cmd := range [][]string{hostPrimeCmd, wgetCmd, curlCmd} { - t.Logf("Running client connection command: %v", cmd) - - stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) - if err != nil { - t.Fatalf("Could not run command (%v) in container: %v", cmd, err) - } - - if len(stderr) != 0 { - t.Logf("Got stderr from command (%v):\n%v\n", cmd, string(stderr)) - } - - if retcode != 0 { - t.Logf("Got stdout from command (%v):\n%v\n", cmd, string(stdout)) - t.Fatalf("Got unexpected non-zero retcode from command (%v): %v\n", cmd, retcode) - } - } -} - -func CheckDeltaCRL(t *testing.T, network string, address string, url string, rootCert string, crls string) { - // We assume the network doesn't change once assigned. - buildClientContainerOnce.Do(func() { - buildWgetCurlContainer(t, network) - builtNetwork = network - }) - - if builtNetwork != network { - t.Fatalf("failed assumption check: different built network (%v) vs run network (%v); must've changed while running tests", builtNetwork, network) - } - - // Start our service with a random name to not conflict with other - // threads. - ctx := context.Background() - result, err := cwRunner.Start(ctx, true, false) - if err != nil { - t.Fatalf("Could not start golang container for wget2 delta CRL checks: %s", err) - } - - // Commands to run after potentially writing the certificate. We - // might augment these if the certificate exists. - // - // We manually add the expected hostname to the local hosts file - // to avoid resolving it over the network and instead resolving it - // to this other container we just started (potentially in parallel - // with other containers). - hostPrimeCmd := []string{"sh", "-c", "echo '" + address + " " + uniqueHostname + "' >> /etc/hosts"} - wgetCmd := []string{"wget2", "--verbose", "--ca-certificate=/root.pem", "--crl-file=/crls.pem", url} - - certCtx := docker.NewBuildContext() - certCtx["root.pem"] = docker.PathContentsFromString(rootCert) - certCtx["crls.pem"] = docker.PathContentsFromString(crls) - if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { - t.Fatalf("Could not copy certificate and key into container: %v", err) - } - - for index, cmd := range [][]string{hostPrimeCmd, wgetCmd} { - t.Logf("Running client connection command: %v", cmd) - - stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) - if err != nil { - t.Fatalf("Could not run command (%v) in container: %v", cmd, err) - } - - if len(stderr) != 0 { - t.Logf("Got stderr from command (%v):\n%v\n", cmd, string(stderr)) - } - - if retcode != 0 && index == 0 { - t.Logf("Got stdout from command (%v):\n%v\n", cmd, string(stdout)) - t.Fatalf("Got unexpected non-zero retcode from command (%v): %v\n", cmd, retcode) - } - - if retcode == 0 && index == 1 { - t.Logf("Got stdout from command (%v):\n%v\n", cmd, string(stdout)) - t.Fatalf("Got unexpected zero retcode from command; wanted this to fail (%v): %v\n", cmd, retcode) - } - } -} - -func CheckWithGo(t *testing.T, rootCert string, clientCert string, clientChain []string, clientKey string, host string, port int, networkAddr string, networkPort int, url string, expected string, shouldFail bool) { - // Ensure we can connect with Go. - pool := x509.NewCertPool() - pool.AppendCertsFromPEM([]byte(rootCert)) - tlsConfig := &tls.Config{ - RootCAs: pool, - } - - if clientCert != "" { - var clientTLSCert tls.Certificate - clientTLSCert.Certificate = append(clientTLSCert.Certificate, parseCert(t, clientCert).Raw) - clientTLSCert.PrivateKey = parseKey(t, clientKey) - for _, cert := range clientChain { - clientTLSCert.Certificate = append(clientTLSCert.Certificate, parseCert(t, cert).Raw) - } - - tlsConfig.Certificates = append(tlsConfig.Certificates, clientTLSCert) - } - - dialer := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - - transport := &http.Transport{ - TLSClientConfig: tlsConfig, - DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - if addr == host+":"+strconv.Itoa(port) { - // If we can't resolve our hostname, try - // accessing it via the docker protocol - // instead of via the returned service - // address. - if _, err := net.LookupHost(host); err != nil && strings.Contains(err.Error(), "no such host") { - addr = networkAddr + ":" + strconv.Itoa(networkPort) - } - } - return dialer.DialContext(ctx, network, addr) - }, - } - - client := &http.Client{Transport: transport} - clientResp, err := client.Get(url) - if err != nil { - if shouldFail { - return - } - - t.Fatalf("failed to fetch url (%v): %v", url, err) - } else if shouldFail { - if clientResp.StatusCode == 200 { - t.Fatalf("expected failure to fetch url (%v): got response: %v", url, clientResp) - } - - return - } - - defer clientResp.Body.Close() - body, err := io.ReadAll(clientResp.Body) - if err != nil { - t.Fatalf("failed to get read response body: %v", err) - } - if !strings.Contains(string(body), expected) { - t.Fatalf("expected body to contain (%v) but was:\n%v", expected, string(body)) - } -} - -func RunNginxRootTest(t *testing.T, caKeyType string, caKeyBits int, caUsePSS bool, roleKeyType string, roleKeyBits int, roleUsePSS bool) { - t.Skipf("flaky in CI") - - b, s := pki.CreateBackendWithStorage(t) - - testSuffix := fmt.Sprintf(" - %v %v %v - %v %v %v", caKeyType, caKeyType, caUsePSS, roleKeyType, roleKeyBits, roleUsePSS) - - // Configure our mount to use auto-rotate, even though we don't have - // a periodic func. - _, err := pki.CBWrite(b, s, "config/crl", map[string]interface{}{ - "auto_rebuild": true, - "enable_delta": true, - }) - - // Create a root and intermediate, setting the intermediate as default. - resp, err := pki.CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Root X1" + testSuffix, - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": caKeyType, - "key_bits": caKeyBits, - "use_pss": caUsePSS, - "issuer_name": "root", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to create root cert") - rootCert := resp.Data["certificate"].(string) - resp, err = pki.CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ - "common_name": "Intermediate I1" + testSuffix, - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": caKeyType, - "key_bits": caKeyBits, - "use_pss": caUsePSS, - }) - requireSuccessNonNilResponse(t, resp, err, "failed to create intermediate csr") - resp, err = pki.CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ - "common_name": "Intermediate I1", - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": caKeyType, - "csr": resp.Data["csr"], - }) - requireSuccessNonNilResponse(t, resp, err, "failed to sign intermediate csr") - intCert := resp.Data["certificate"].(string) - resp, err = pki.CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": intCert, - }) - requireSuccessNonNilResponse(t, resp, err, "failed to sign intermediate csr") - _, err = pki.CBWrite(b, s, "config/issuers", map[string]interface{}{ - "default": resp.Data["imported_issuers"].([]string)[0], - }) - - // Create a role+certificate valid for localhost only. - _, err = pki.CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "key_type": roleKeyType, - "key_bits": roleKeyBits, - "use_pss": roleUsePSS, - "ttl": "60m", - }) - require.NoError(t, err) - resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": uniqueHostname, - "ip_sans": "127.0.0.1,::1", - "sans": uniqueHostname + ",localhost,localhost4,localhost6,localhost.localdomain", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to create server leaf cert") - leafCert := resp.Data["certificate"].(string) - leafPrivateKey := resp.Data["private_key"].(string) + "\n" - fullChain := leafCert + "\n" - for _, cert := range resp.Data["ca_chain"].([]string) { - fullChain += cert + "\n" - } - - // Issue a client leaf certificate. - resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "testing.client.dadgarcorp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to create client leaf cert") - clientCert := resp.Data["certificate"].(string) - clientKey := resp.Data["private_key"].(string) + "\n" - clientWireChain := clientCert + "\n" + resp.Data["issuing_ca"].(string) + "\n" - clientTrustChain := resp.Data["issuing_ca"].(string) + "\n" + rootCert + "\n" - clientCAChain := resp.Data["ca_chain"].([]string) - - // Issue a client leaf cert and revoke it, placing it on the main CRL - // via rotation. - resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "revoked-crl.client.dadgarcorp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to create revoked client leaf cert") - revokedCert := resp.Data["certificate"].(string) - revokedKey := resp.Data["private_key"].(string) + "\n" - // revokedFullChain := revokedCert + "\n" + resp.Data["issuing_ca"].(string) + "\n" - // revokedTrustChain := resp.Data["issuing_ca"].(string) + "\n" + rootCert + "\n" - revokedCAChain := resp.Data["ca_chain"].([]string) - _, err = pki.CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": revokedCert, - }) - require.NoError(t, err) - _, err = pki.CBRead(b, s, "crl/rotate") - require.NoError(t, err) - - // Issue a client leaf cert and revoke it, placing it on the delta CRL - // via rotation. - /*resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "revoked-delta-crl.client.dadgarcorp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to create delta CRL revoked client leaf cert") - deltaCert := resp.Data["certificate"].(string) - deltaKey := resp.Data["private_key"].(string) + "\n" - //deltaFullChain := deltaCert + "\n" + resp.Data["issuing_ca"].(string) + "\n" - //deltaTrustChain := resp.Data["issuing_ca"].(string) + "\n" + rootCert + "\n" - deltaCAChain := resp.Data["ca_chain"].([]string) - _, err = pki.CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": deltaCert, - }) - require.NoError(t, err) - _, err = pki.CBRead(b, s, "crl/rotate-delta") - require.NoError(t, err)*/ - - // Get the CRL and Delta CRLs. - resp, err = pki.CBRead(b, s, "issuer/root/crl") - require.NoError(t, err) - rootCRL := resp.Data["crl"].(string) + "\n" - resp, err = pki.CBRead(b, s, "issuer/default/crl") - require.NoError(t, err) - intCRL := resp.Data["crl"].(string) + "\n" - - // No need to fetch root Delta CRL as we've not revoked anything on it. - resp, err = pki.CBRead(b, s, "issuer/default/crl/delta") - require.NoError(t, err) - deltaCRL := resp.Data["crl"].(string) + "\n" - - crls := rootCRL + intCRL + deltaCRL - - cleanup, host, port, networkName, networkAddr, networkPort := buildNginxContainer(t, rootCert, crls, fullChain, leafPrivateKey) - defer cleanup() - - if host != "127.0.0.1" && host != "::1" && strings.HasPrefix(host, containerName) { - t.Logf("Assuming %v:%v is a container name rather than localhost reference.", host, port) - host = uniqueHostname - port = networkPort - } - - localBase := "https://" + host + ":" + strconv.Itoa(port) - localURL := localBase + "/index.html" - localProtectedURL := localBase + "/protected.html" - containerBase := "https://" + uniqueHostname + ":" + strconv.Itoa(networkPort) - containerURL := containerBase + "/index.html" - containerProtectedURL := containerBase + "/protected.html" - - t.Logf("Spawned nginx container:\nhost: %v\nport: %v\nnetworkName: %v\nnetworkAddr: %v\nnetworkPort: %v\nlocalURL: %v\ncontainerURL: %v\n", host, port, networkName, networkAddr, networkPort, localBase, containerBase) - - // Ensure we can connect with Go. We do our checks for revocation here, - // as this behavior is server-controlled and shouldn't matter based on - // client type. - CheckWithGo(t, rootCert, "", nil, "", host, port, networkAddr, networkPort, localURL, unprotectedFile, false) - CheckWithGo(t, rootCert, "", nil, "", host, port, networkAddr, networkPort, localProtectedURL, failureIndicator, true) - CheckWithGo(t, rootCert, clientCert, clientCAChain, clientKey, host, port, networkAddr, networkPort, localProtectedURL, protectedFile, false) - CheckWithGo(t, rootCert, revokedCert, revokedCAChain, revokedKey, host, port, networkAddr, networkPort, localProtectedURL, protectedFile, true) - // CheckWithGo(t, rootCert, deltaCert, deltaCAChain, deltaKey, host, port, networkAddr, networkPort, localProtectedURL, protectedFile, true) - - // Ensure we can connect with wget/curl. - CheckWithClients(t, networkName, networkAddr, containerURL, rootCert, "", "") - CheckWithClients(t, networkName, networkAddr, containerProtectedURL, clientTrustChain, clientWireChain, clientKey) - - // Ensure OpenSSL will validate the delta CRL by revoking our server leaf - // and then using it with wget2. This will land on the intermediate's - // Delta CRL. - _, err = pki.CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": leafCert, - }) - require.NoError(t, err) - _, err = pki.CBRead(b, s, "crl/rotate-delta") - require.NoError(t, err) - resp, err = pki.CBRead(b, s, "issuer/default/crl/delta") - require.NoError(t, err) - deltaCRL = resp.Data["crl"].(string) + "\n" - crls = rootCRL + intCRL + deltaCRL - - CheckDeltaCRL(t, networkName, networkAddr, containerURL, rootCert, crls) -} - -func Test_NginxRSAPure(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "rsa", 2048, false, "rsa", 2048, false) -} - -func Test_NginxRSAPurePSS(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "rsa", 2048, false, "rsa", 2048, true) -} - -func Test_NginxRSAPSSPure(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "rsa", 2048, true, "rsa", 2048, false) -} - -func Test_NginxRSAPSSPurePSS(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "rsa", 2048, true, "rsa", 2048, true) -} - -func Test_NginxECDSA256Pure(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "ec", 256, false, "ec", 256, false) -} - -func Test_NginxECDSAHybrid(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "ec", 256, false, "rsa", 2048, false) -} - -func Test_NginxECDSAHybridPSS(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "ec", 256, false, "rsa", 2048, true) -} - -func Test_NginxRSAHybrid(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "rsa", 2048, false, "ec", 256, false) -} - -func Test_NginxRSAPSSHybrid(t *testing.T) { - t.Parallel() - RunNginxRootTest(t, "rsa", 2048, true, "ec", 256, false) -} diff --git a/builtin/logical/pkiext/pkiext_binary/acme_test.go b/builtin/logical/pkiext/pkiext_binary/acme_test.go deleted file mode 100644 index 5cdd146964cfb..0000000000000 --- a/builtin/logical/pkiext/pkiext_binary/acme_test.go +++ /dev/null @@ -1,937 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pkiext_binary - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/hex" - "errors" - "fmt" - "net" - "net/http" - "path" - "testing" - "time" - - "golang.org/x/crypto/acme" - - "github.com/hashicorp/vault/builtin/logical/pkiext" - "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/sdk/helper/certutil" - hDocker "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/stretchr/testify/require" -) - -// Test_ACME will start a Vault cluster using the docker based binary, and execute -// a bunch of sub-tests against that cluster. It is up to each sub-test to run/configure -// a new pki mount within the cluster to not interfere with each other. -func Test_ACME(t *testing.T) { - cluster := NewVaultPkiClusterWithDNS(t) - defer cluster.Cleanup() - - tc := map[string]func(t *testing.T, cluster *VaultPkiCluster){ - "certbot": SubtestACMECertbot, - "certbot eab": SubtestACMECertbotEab, - "acme ip sans": SubtestACMEIPAndDNS, - "acme wildcard": SubtestACMEWildcardDNS, - "acme prevents ica": SubtestACMEPreventsICADNS, - } - - // Wrap the tests within an outer group, so that we run all tests - // in parallel, but still wait for all tests to finish before completing - // and running the cleanup of the Vault cluster. - t.Run("group", func(gt *testing.T) { - for testName := range tc { - // Trap the function to be embedded later in the run so it - // doesn't get clobbered on the next for iteration - testFunc := tc[testName] - - gt.Run(testName, func(st *testing.T) { - st.Parallel() - testFunc(st, cluster) - }) - } - }) - - // Do not run these tests in parallel. - t.Run("step down", func(gt *testing.T) { SubtestACMEStepDownNode(gt, cluster) }) -} - -func SubtestACMECertbot(t *testing.T, cluster *VaultPkiCluster) { - pki, err := cluster.CreateAcmeMount("pki") - require.NoError(t, err, "failed setting up acme mount") - - directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/pki/acme/directory" - vaultNetwork := pki.GetContainerNetworkName() - - logConsumer, logStdout, logStderr := getDockerLog(t) - - // Default to 45 second timeout, but bump to 120 when running locally or if nightly regression - // flag is provided. - sleepTimer := "45" - if testhelpers.IsLocalOrRegressionTests() { - sleepTimer = "120" - } - - t.Logf("creating on network: %v", vaultNetwork) - runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", - ImageTag: "latest", - ContainerName: "vault_pki_certbot_test", - NetworkName: vaultNetwork, - Entrypoint: []string{"sleep", sleepTimer}, - LogConsumer: logConsumer, - LogStdout: logStdout, - LogStderr: logStderr, - }) - require.NoError(t, err, "failed creating service runner") - - ctx := context.Background() - result, err := runner.Start(ctx, true, false) - require.NoError(t, err, "could not start container") - require.NotNil(t, result, "could not start container") - - defer runner.Stop(context.Background(), result.Container.ID) - - networks, err := runner.GetNetworkAndAddresses(result.Container.ID) - require.NoError(t, err, "could not read container's IP address") - require.Contains(t, networks, vaultNetwork, "expected to contain vault network") - - ipAddr := networks[vaultNetwork] - hostname := "certbot-acme-client.dadgarcorp.com" - - err = pki.AddHostname(hostname, ipAddr) - require.NoError(t, err, "failed to update vault host files") - - // Sinkhole a domain that's invalid just in case it's registered in the future. - cluster.Dns.AddDomain("armoncorp.com") - cluster.Dns.AddRecord("armoncorp.com", "A", "127.0.0.1") - - certbotCmd := []string{ - "certbot", - "certonly", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--agree-tos", - "--no-verify-ssl", - "--standalone", - "--non-interactive", - "--server", directory, - "-d", hostname, - } - logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} - - stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) - t.Logf("Certbot Issue Command: %v\nstdout: %v\nstderr: %v\n", certbotCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running issue command") - require.Equal(t, 0, retcode, "expected zero retcode issue command result") - - // N.B. We're using the `certonly` subcommand here because it seems as though the `renew` command - // attempts to install the cert for you. This ends up hanging and getting killed by docker, but is - // also not desired behavior. The certbot docs suggest using `certonly` to renew as seen here: - // https://eff-certbot.readthedocs.io/en/stable/using.html#renewing-certificates - certbotRenewCmd := []string{ - "certbot", - "certonly", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--agree-tos", - "--no-verify-ssl", - "--standalone", - "--non-interactive", - "--server", directory, - "-d", hostname, - "--cert-name", hostname, - "--force-renewal", - } - - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) - t.Logf("Certbot Renew Command: %v\nstdout: %v\nstderr: %v\n", certbotRenewCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running renew command") - require.Equal(t, 0, retcode, "expected zero retcode renew command result") - - certbotRevokeCmd := []string{ - "certbot", - "revoke", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--agree-tos", - "--no-verify-ssl", - "--non-interactive", - "--no-delete-after-revoke", - "--cert-name", hostname, - } - - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) - t.Logf("Certbot Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running revoke command") - require.Equal(t, 0, retcode, "expected zero retcode revoke command result") - - // Revoking twice should fail. - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) - t.Logf("Certbot Double Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) - if err != nil || retcode == 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - - require.NoError(t, err, "got error running double revoke command") - require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") - - // Attempt to issue against a domain that doesn't match the challenge. - // N.B. This test only runs locally or when the nightly regression env var is provided to CI. - if testhelpers.IsLocalOrRegressionTests() { - certbotInvalidIssueCmd := []string{ - "certbot", - "certonly", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--agree-tos", - "--no-verify-ssl", - "--standalone", - "--non-interactive", - "--server", directory, - "-d", "armoncorp.com", - "--issuance-timeout", "10", - } - - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotInvalidIssueCmd) - t.Logf("Certbot Invalid Issue Command: %v\nstdout: %v\nstderr: %v\n", certbotInvalidIssueCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running issue command") - require.NotEqual(t, 0, retcode, "expected non-zero retcode issue command result") - } - - // Attempt to close out our ACME account - certbotUnregisterCmd := []string{ - "certbot", - "unregister", - "--no-verify-ssl", - "--non-interactive", - "--server", directory, - } - - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) - t.Logf("Certbot Unregister Command: %v\nstdout: %v\nstderr: %v\n", certbotUnregisterCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running unregister command") - require.Equal(t, 0, retcode, "expected zero retcode unregister command result") - - // Attempting to close out our ACME account twice should fail - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) - t.Logf("Certbot double Unregister Command: %v\nstdout: %v\nstderr: %v\n", certbotUnregisterCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot double logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running double unregister command") - require.Equal(t, 1, retcode, "expected non-zero retcode double unregister command result") -} - -func SubtestACMECertbotEab(t *testing.T, cluster *VaultPkiCluster) { - mountName := "pki-certbot-eab" - pki, err := cluster.CreateAcmeMount(mountName) - require.NoError(t, err, "failed setting up acme mount") - - err = pki.UpdateAcmeConfig(true, map[string]interface{}{ - "eab_policy": "new-account-required", - }) - require.NoError(t, err) - - eabId, base64EabKey, err := pki.GetEabKey("acme/") - - directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/" + mountName + "/acme/directory" - vaultNetwork := pki.GetContainerNetworkName() - - logConsumer, logStdout, logStderr := getDockerLog(t) - - t.Logf("creating on network: %v", vaultNetwork) - runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", - ImageTag: "latest", - ContainerName: "vault_pki_certbot_eab_test", - NetworkName: vaultNetwork, - Entrypoint: []string{"sleep", "45"}, - LogConsumer: logConsumer, - LogStdout: logStdout, - LogStderr: logStderr, - }) - require.NoError(t, err, "failed creating service runner") - - ctx := context.Background() - result, err := runner.Start(ctx, true, false) - require.NoError(t, err, "could not start container") - require.NotNil(t, result, "could not start container") - - defer runner.Stop(context.Background(), result.Container.ID) - - networks, err := runner.GetNetworkAndAddresses(result.Container.ID) - require.NoError(t, err, "could not read container's IP address") - require.Contains(t, networks, vaultNetwork, "expected to contain vault network") - - ipAddr := networks[vaultNetwork] - hostname := "certbot-eab-acme-client.dadgarcorp.com" - - err = pki.AddHostname(hostname, ipAddr) - require.NoError(t, err, "failed to update vault host files") - - certbotCmd := []string{ - "certbot", - "certonly", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--eab-kid", eabId, - "--eab-hmac-key='" + base64EabKey + "'", - "--agree-tos", - "--no-verify-ssl", - "--standalone", - "--non-interactive", - "--server", directory, - "-d", hostname, - } - logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} - - stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) - t.Logf("Certbot Issue Command: %v\nstdout: %v\nstderr: %v\n", certbotCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running issue command") - require.Equal(t, 0, retcode, "expected zero retcode issue command result") - - certbotRenewCmd := []string{ - "certbot", - "certonly", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--agree-tos", - "--no-verify-ssl", - "--standalone", - "--non-interactive", - "--server", directory, - "-d", hostname, - "--cert-name", hostname, - "--force-renewal", - } - - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) - t.Logf("Certbot Renew Command: %v\nstdout: %v\nstderr: %v\n", certbotRenewCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running renew command") - require.Equal(t, 0, retcode, "expected zero retcode renew command result") - - certbotRevokeCmd := []string{ - "certbot", - "revoke", - "--no-eff-email", - "--email", "certbot.client@dadgarcorp.com", - "--agree-tos", - "--no-verify-ssl", - "--non-interactive", - "--no-delete-after-revoke", - "--cert-name", hostname, - } - - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) - t.Logf("Certbot Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) - if err != nil || retcode != 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - require.NoError(t, err, "got error running revoke command") - require.Equal(t, 0, retcode, "expected zero retcode revoke command result") - - // Revoking twice should fail. - stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) - t.Logf("Certbot Double Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) - if err != nil || retcode == 0 { - logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) - t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) - } - - require.NoError(t, err, "got error running double revoke command") - require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") -} - -func SubtestACMEIPAndDNS(t *testing.T, cluster *VaultPkiCluster) { - pki, err := cluster.CreateAcmeMount("pki-ip-dns-sans") - require.NoError(t, err, "failed setting up acme mount") - - // Since we interact with ACME from outside the container network the ACME - // configuration needs to be updated to use the host port and not the internal - // docker ip. - basePath, err := pki.UpdateClusterConfigLocalAddr() - require.NoError(t, err, "failed updating cluster config") - - logConsumer, logStdout, logStderr := getDockerLog(t) - - // Setup an nginx container that we can have respond the queries for ips - runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/nginx", - ImageTag: "latest", - ContainerName: "vault_pki_ipsans_test", - NetworkName: pki.GetContainerNetworkName(), - LogConsumer: logConsumer, - LogStdout: logStdout, - LogStderr: logStderr, - }) - require.NoError(t, err, "failed creating service runner") - - ctx := context.Background() - result, err := runner.Start(ctx, true, false) - require.NoError(t, err, "could not start container") - require.NotNil(t, result, "could not start container") - - nginxContainerId := result.Container.ID - defer runner.Stop(context.Background(), nginxContainerId) - networks, err := runner.GetNetworkAndAddresses(nginxContainerId) - - challengeFolder := "/usr/share/nginx/html/.well-known/acme-challenge/" - createChallengeFolderCmd := []string{ - "sh", "-c", - "mkdir -p '" + challengeFolder + "'", - } - stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, nginxContainerId, createChallengeFolderCmd) - require.NoError(t, err, "failed to create folder in nginx container") - t.Logf("Update host file command: %v\nstdout: %v\nstderr: %v", createChallengeFolderCmd, string(stdout), string(stderr)) - require.Equal(t, 0, retcode, "expected zero retcode from mkdir in nginx container") - - ipAddr := networks[pki.GetContainerNetworkName()] - hostname := "go-lang-acme-client.dadgarcorp.com" - - err = pki.AddHostname(hostname, ipAddr) - require.NoError(t, err, "failed to update vault host files") - - // Perform an ACME lifecycle with an order that contains both an IP and a DNS name identifier - err = pki.UpdateRole("ip-dns-sans", map[string]interface{}{ - "key_type": "any", - "allowed_domains": "dadgarcorp.com", - "allow_subdomains": true, - "allow_wildcard_certificates": false, - }) - require.NoError(t, err, "failed creating role ip-dns-sans") - - directoryUrl := basePath + "/roles/ip-dns-sans/acme/directory" - acmeOrderIdentifiers := []acme.AuthzID{ - {Type: "ip", Value: ipAddr}, - {Type: "dns", Value: hostname}, - } - cr := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: hostname}, - DNSNames: []string{hostname}, - IPAddresses: []net.IP{net.ParseIP(ipAddr)}, - } - - provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { - // For each http-01 challenge, generate the file to place underneath the nginx challenge folder - acmeCtx := hDocker.NewBuildContext() - var challengesToAccept []*acme.Challenge - for _, auth := range auths { - for _, challenge := range auth.Challenges { - if challenge.Status != acme.StatusPending { - t.Logf("ignoring challenge not in status pending: %v", challenge) - continue - } - - if challenge.Type == "http-01" { - challengeBody, err := acmeClient.HTTP01ChallengeResponse(challenge.Token) - require.NoError(t, err, "failed generating challenge response") - - challengePath := acmeClient.HTTP01ChallengePath(challenge.Token) - require.NoError(t, err, "failed generating challenge path") - - challengeFile := path.Base(challengePath) - - acmeCtx[challengeFile] = hDocker.PathContentsFromString(challengeBody) - - challengesToAccept = append(challengesToAccept, challenge) - } - } - } - - require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") - - // Copy all challenges within the nginx container - err = runner.CopyTo(nginxContainerId, challengeFolder, acmeCtx) - require.NoError(t, err, "failed copying challenges to container") - - return challengesToAccept - } - - acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") - - require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") - require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) - require.Equal(t, []string{hostname}, acmeCert.DNSNames) - require.Equal(t, hostname, acmeCert.Subject.CommonName) - - // Perform an ACME lifecycle with an order that contains just an IP identifier - err = pki.UpdateRole("ip-sans", map[string]interface{}{ - "key_type": "any", - "use_csr_common_name": false, - "require_cn": false, - "client_flag": false, - }) - require.NoError(t, err, "failed creating role ip-sans") - - directoryUrl = basePath + "/roles/ip-sans/acme/directory" - acmeOrderIdentifiers = []acme.AuthzID{ - {Type: "ip", Value: ipAddr}, - } - cr = &x509.CertificateRequest{ - IPAddresses: []net.IP{net.ParseIP(ipAddr)}, - } - - acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") - - require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") - require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) - require.Empty(t, acmeCert.DNSNames, "acme cert dns name field should have been empty") - require.Equal(t, "", acmeCert.Subject.CommonName) -} - -type acmeGoValidatorProvisionerFunc func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge - -func doAcmeValidationWithGoLibrary(t *testing.T, directoryUrl string, acmeOrderIdentifiers []acme.AuthzID, cr *x509.CertificateRequest, provisioningFunc acmeGoValidatorProvisionerFunc, expectedFailure string) *x509.Certificate { - // Since we are contacting Vault through the host ip/port, the certificate will not validate properly - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - httpClient := &http.Client{Transport: tr} - - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa account key") - - t.Logf("Using the following url for the ACME directory: %s", directoryUrl) - acmeClient := &acme.Client{ - Key: accountKey, - HTTPClient: httpClient, - DirectoryURL: directoryUrl, - } - - testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancelFunc() - - // Create new account - _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, - func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create an ACME order - order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) - require.NoError(t, err, "failed creating ACME order") - - var auths []*acme.Authorization - for _, authUrl := range order.AuthzURLs { - authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) - auths = append(auths, authorization) - } - - // Handle the validation using the external validation mechanism. - challengesToAccept := provisioningFunc(acmeClient, auths) - require.NotEmpty(t, challengesToAccept, "provisioning function failed to return any challenges to accept") - - // Tell the ACME server, that they can now validate those challenges. - for _, challenge := range challengesToAccept { - _, err = acmeClient.Accept(testCtx, challenge) - require.NoError(t, err, "failed to accept challenge: %v", challenge) - } - - // Wait for the order/challenges to be validated. - _, err = acmeClient.WaitOrder(testCtx, order.URI) - require.NoError(t, err, "failed waiting for order to be ready") - - // Create/sign the CSR and ask ACME server to sign it returning us the final certificate - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) - require.NoError(t, err, "failed generating csr") - - t.Logf("[TEST-LOG] Created CSR: %v", hex.EncodeToString(csr)) - - certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) - if err != nil { - if expectedFailure != "" { - require.Contains(t, err.Error(), expectedFailure, "got a unexpected failure not matching expected value") - return nil - } - - require.NoError(t, err, "failed to get a certificate back from ACME") - } else if expectedFailure != "" { - t.Fatalf("expected failure containing: %s got none", expectedFailure) - } - - acmeCert, err := x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert bytes") - - return acmeCert -} - -func SubtestACMEWildcardDNS(t *testing.T, cluster *VaultPkiCluster) { - pki, err := cluster.CreateAcmeMount("pki-dns-wildcards") - require.NoError(t, err, "failed setting up acme mount") - - // Since we interact with ACME from outside the container network the ACME - // configuration needs to be updated to use the host port and not the internal - // docker ip. - basePath, err := pki.UpdateClusterConfigLocalAddr() - require.NoError(t, err, "failed updating cluster config") - - hostname := "go-lang-wildcard-client.dadgarcorp.com" - wildcard := "*." + hostname - - // Do validation without a role first. - directoryUrl := basePath + "/acme/directory" - acmeOrderIdentifiers := []acme.AuthzID{ - {Type: "dns", Value: hostname}, - {Type: "dns", Value: wildcard}, - } - cr := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: wildcard}, - DNSNames: []string{hostname, wildcard}, - } - - provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { - // For each dns-01 challenge, place the record in the associated DNS resolver. - var challengesToAccept []*acme.Challenge - for _, auth := range auths { - for _, challenge := range auth.Challenges { - if challenge.Status != acme.StatusPending { - t.Logf("ignoring challenge not in status pending: %v", challenge) - continue - } - - if challenge.Type == "dns-01" { - challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) - require.NoError(t, err, "failed generating challenge response") - - err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) - require.NoError(t, err, "failed setting DNS record") - - challengesToAccept = append(challengesToAccept, challenge) - } - } - } - - require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") - return challengesToAccept - } - - acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") - require.Contains(t, acmeCert.DNSNames, hostname) - require.Contains(t, acmeCert.DNSNames, wildcard) - require.Equal(t, wildcard, acmeCert.Subject.CommonName) - pki.RemoveDNSRecordsForDomain(hostname) - - // Redo validation with a role this time. - err = pki.UpdateRole("wildcard", map[string]interface{}{ - "key_type": "any", - "allowed_domains": "go-lang-wildcard-client.dadgarcorp.com", - "allow_subdomains": true, - "allow_bare_domains": true, - "allow_wildcard_certificates": true, - "client_flag": false, - }) - require.NoError(t, err, "failed creating role wildcard") - directoryUrl = basePath + "/roles/wildcard/acme/directory" - - acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") - require.Contains(t, acmeCert.DNSNames, hostname) - require.Contains(t, acmeCert.DNSNames, wildcard) - require.Equal(t, wildcard, acmeCert.Subject.CommonName) - pki.RemoveDNSRecordsForDomain(hostname) -} - -func SubtestACMEPreventsICADNS(t *testing.T, cluster *VaultPkiCluster) { - pki, err := cluster.CreateAcmeMount("pki-dns-ica") - require.NoError(t, err, "failed setting up acme mount") - - // Since we interact with ACME from outside the container network the ACME - // configuration needs to be updated to use the host port and not the internal - // docker ip. - basePath, err := pki.UpdateClusterConfigLocalAddr() - require.NoError(t, err, "failed updating cluster config") - - hostname := "go-lang-intermediate-ca-cert.dadgarcorp.com" - - // Do validation without a role first. - directoryUrl := basePath + "/acme/directory" - acmeOrderIdentifiers := []acme.AuthzID{ - {Type: "dns", Value: hostname}, - } - cr := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: hostname}, - DNSNames: []string{hostname}, - ExtraExtensions: []pkix.Extension{ - // Basic Constraint with IsCA asserted to true. - { - Id: certutil.ExtensionBasicConstraintsOID, - Critical: true, - Value: []byte{0x30, 0x03, 0x01, 0x01, 0xFF}, - }, - }, - } - - provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { - // For each dns-01 challenge, place the record in the associated DNS resolver. - var challengesToAccept []*acme.Challenge - for _, auth := range auths { - for _, challenge := range auth.Challenges { - if challenge.Status != acme.StatusPending { - t.Logf("ignoring challenge not in status pending: %v", challenge) - continue - } - - if challenge.Type == "dns-01" { - challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) - require.NoError(t, err, "failed generating challenge response") - - err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) - require.NoError(t, err, "failed setting DNS record") - - challengesToAccept = append(challengesToAccept, challenge) - } - } - } - - require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") - return challengesToAccept - } - - doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") - pki.RemoveDNSRecordsForDomain(hostname) - - // Redo validation with a role this time. - err = pki.UpdateRole("ica", map[string]interface{}{ - "key_type": "any", - "allowed_domains": "go-lang-intermediate-ca-cert.dadgarcorp.com", - "allow_subdomains": true, - "allow_bare_domains": true, - "allow_wildcard_certificates": true, - "client_flag": false, - }) - require.NoError(t, err, "failed creating role wildcard") - directoryUrl = basePath + "/roles/ica/acme/directory" - - doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") - pki.RemoveDNSRecordsForDomain(hostname) -} - -// SubtestACMEStepDownNode Verify that we can properly run an ACME session through a -// secondary node, and midway through the challenge verification process, seal the -// active node and make sure we can complete the ACME session on the new active node. -func SubtestACMEStepDownNode(t *testing.T, cluster *VaultPkiCluster) { - pki, err := cluster.CreateAcmeMount("stepdown-test") - require.NoError(t, err) - - // Since we interact with ACME from outside the container network the ACME - // configuration needs to be updated to use the host port and not the internal - // docker ip. We also grab the non-active node here on purpose to verify - // ACME related APIs are properly forwarded across standby hosts. - nonActiveNodes := pki.GetNonActiveNodes() - require.GreaterOrEqual(t, len(nonActiveNodes), 1, "Need at least one non-active node") - - nonActiveNode := nonActiveNodes[0] - - basePath := fmt.Sprintf("https://%s/v1/%s", nonActiveNode.HostPort, pki.mount) - err = pki.UpdateClusterConfig(map[string]interface{}{ - "path": basePath, - }) - - hostname := "go-lang-stepdown-client.dadgarcorp.com" - - acmeOrderIdentifiers := []acme.AuthzID{ - {Type: "dns", Value: hostname}, - } - cr := &x509.CertificateRequest{ - DNSNames: []string{hostname, hostname}, - } - - accountKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err, "failed creating rsa account key") - - acmeClient := &acme.Client{ - Key: accountKey, - HTTPClient: &http.Client{Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - }}, - DirectoryURL: basePath + "/acme/directory", - } - - testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancelFunc() - - // Create new account - _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, - func(tosURL string) bool { return true }) - require.NoError(t, err, "failed registering account") - - // Create an ACME order - order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) - require.NoError(t, err, "failed creating ACME order") - - require.Len(t, order.AuthzURLs, 1, "expected a single authz url") - authUrl := order.AuthzURLs[0] - - authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) - - dnsTxtRecordsToAdd := map[string]string{} - - var challengesToAccept []*acme.Challenge - for _, challenge := range authorization.Challenges { - if challenge.Status != acme.StatusPending { - t.Logf("ignoring challenge not in status pending: %v", challenge) - continue - } - - if challenge.Type == "dns-01" { - challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) - require.NoError(t, err, "failed generating challenge response") - - // Collect the challenges for us to add the DNS records after step-down - dnsTxtRecordsToAdd["_acme-challenge."+authorization.Identifier.Value] = challengeBody - challengesToAccept = append(challengesToAccept, challenge) - } - } - - // Tell the ACME server, that they can now validate those challenges, this will cause challenge - // verification failures on the main node as the DNS records do not exist. - for _, challenge := range challengesToAccept { - _, err = acmeClient.Accept(testCtx, challenge) - require.NoError(t, err, "failed to accept challenge: %v", challenge) - } - - // Now wait till we start seeing the challenge engine start failing the lookups. - testhelpers.RetryUntil(t, 10*time.Second, func() error { - myAuth, err := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) - - for _, challenge := range myAuth.Challenges { - if challenge.Error != nil { - // The engine failed on one of the challenges, we are done waiting - return nil - } - } - - return fmt.Errorf("no challenges for auth %v contained any errors", myAuth.Identifier) - }) - - // Seal the active node now and wait for the next node to appear - previousActiveNode := pki.GetActiveClusterNode() - t.Logf("Stepping down node id: %s", previousActiveNode.NodeID) - - haStatus, _ := previousActiveNode.APIClient().Sys().HAStatus() - t.Logf("Node: %v HaStatus: %v\n", previousActiveNode.NodeID, haStatus) - - testhelpers.RetryUntil(t, 2*time.Minute, func() error { - state, err := previousActiveNode.APIClient().Sys().RaftAutopilotState() - if err != nil { - return err - } - - t.Logf("Node: %v Raft AutoPilotState: %v\n", previousActiveNode.NodeID, state) - - if !state.Healthy { - return fmt.Errorf("raft auto pilot state is not healthy") - } - - // Make sure that we have at least one node that can take over prior to sealing the current active node. - if state.FailureTolerance < 1 { - msg := fmt.Sprintf("there is no fault tolerance within raft state yet: %d", state.FailureTolerance) - t.Log(msg) - return errors.New(msg) - } - - return nil - }) - - t.Logf("Sealing active node") - err = previousActiveNode.APIClient().Sys().Seal() - require.NoError(t, err, "failed stepping down node") - - // Add our DNS records now - t.Logf("Adding DNS records") - for dnsHost, dnsValue := range dnsTxtRecordsToAdd { - err = pki.AddDNSRecord(dnsHost, "TXT", dnsValue) - require.NoError(t, err, "failed adding DNS record: %s:%s", dnsHost, dnsValue) - } - - // Wait for our new active node to come up - testhelpers.RetryUntil(t, 2*time.Minute, func() error { - newNode := pki.GetActiveClusterNode() - if newNode.NodeID == previousActiveNode.NodeID { - return fmt.Errorf("existing node is still the leader after stepdown: %s", newNode.NodeID) - } - - t.Logf("New active node has node id: %v", newNode.NodeID) - return nil - }) - - // Wait for the order/challenges to be validated. - _, err = acmeClient.WaitOrder(testCtx, order.URI) - if err != nil { - // We failed waiting for the order to become ready, lets print out current challenge statuses to help debugging - myAuth, authErr := acmeClient.GetAuthorization(testCtx, authUrl) - require.NoError(t, authErr, "failed to lookup authorization at url: %s and wait order failed with: %v", authUrl, err) - - t.Logf("Authorization Status: %s", myAuth.Status) - for _, challenge := range myAuth.Challenges { - // The engine failed on one of the challenges, we are done waiting - t.Logf("challenge: %v state: %v Error: %v", challenge.Type, challenge.Status, challenge.Error) - } - - require.NoError(t, err, "failed waiting for order to be ready") - } - - // Create/sign the CSR and ask ACME server to sign it returning us the final certificate - csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) - require.NoError(t, err, "failed generating csr") - - certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) - require.NoError(t, err, "failed to get a certificate back from ACME") - - _, err = x509.ParseCertificate(certs[0]) - require.NoError(t, err, "failed parsing acme cert bytes") -} - -func getDockerLog(t *testing.T) (func(s string), *pkiext.LogConsumerWriter, *pkiext.LogConsumerWriter) { - logConsumer := func(s string) { - t.Logf(s) - } - - logStdout := &pkiext.LogConsumerWriter{logConsumer} - logStderr := &pkiext.LogConsumerWriter{logConsumer} - return logConsumer, logStdout, logStderr -} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_cluster.go b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go deleted file mode 100644 index da33300934baa..0000000000000 --- a/builtin/logical/pkiext/pkiext_binary/pki_cluster.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pkiext_binary - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/pki/dnstest" - dockhelper "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/hashicorp/vault/sdk/helper/testcluster" - "github.com/hashicorp/vault/sdk/helper/testcluster/docker" -) - -type VaultPkiCluster struct { - cluster *docker.DockerCluster - Dns *dnstest.TestServer -} - -func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster { - binary := os.Getenv("VAULT_BINARY") - if binary == "" { - t.Skip("only running docker test when $VAULT_BINARY present") - } - - opts := &docker.DockerClusterOptions{ - ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault", - // We're replacing the binary anyway, so we're not too particular about - // the docker image version tag. - ImageTag: "latest", - VaultBinary: binary, - ClusterOptions: testcluster.ClusterOptions{ - VaultNodeConfig: &testcluster.VaultNodeConfig{ - LogLevel: "TRACE", - }, - NumCores: 3, - }, - } - - cluster := docker.NewTestDockerCluster(t, opts) - - return &VaultPkiCluster{cluster: cluster} -} - -func NewVaultPkiClusterWithDNS(t *testing.T) *VaultPkiCluster { - cluster := NewVaultPkiCluster(t) - dns := dnstest.SetupResolverOnNetwork(t, "dadgarcorp.com", cluster.GetContainerNetworkName()) - cluster.Dns = dns - return cluster -} - -func (vpc *VaultPkiCluster) Cleanup() { - vpc.cluster.Cleanup() - if vpc.Dns != nil { - vpc.Dns.Cleanup() - } -} - -func (vpc *VaultPkiCluster) GetActiveClusterNode() *docker.DockerClusterNode { - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - node, err := testcluster.WaitForActiveNode(ctx, vpc.cluster) - if err != nil { - panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err)) - } - - return vpc.cluster.ClusterNodes[node] -} - -func (vpc *VaultPkiCluster) GetNonActiveNodes() []*docker.DockerClusterNode { - nodes := []*docker.DockerClusterNode{} - for _, node := range vpc.cluster.ClusterNodes { - leader, err := node.APIClient().Sys().Leader() - if err != nil { - continue - } - - if !leader.IsSelf { - nodes = append(nodes, node) - } - } - - return nodes -} - -func (vpc *VaultPkiCluster) GetActiveContainerHostPort() string { - return vpc.GetActiveClusterNode().HostPort -} - -func (vpc *VaultPkiCluster) GetContainerNetworkName() string { - return vpc.cluster.ClusterNodes[0].ContainerNetworkName -} - -func (vpc *VaultPkiCluster) GetActiveContainerIP() string { - return vpc.GetActiveClusterNode().ContainerIPAddress -} - -func (vpc *VaultPkiCluster) GetActiveContainerID() string { - return vpc.GetActiveClusterNode().Container.ID -} - -func (vpc *VaultPkiCluster) GetActiveNode() *api.Client { - return vpc.GetActiveClusterNode().APIClient() -} - -func (vpc *VaultPkiCluster) AddHostname(hostname, ip string) error { - if vpc.Dns != nil { - vpc.Dns.AddRecord(hostname, "A", ip) - vpc.Dns.PushConfig() - return nil - } else { - return vpc.AddNameToHostFiles(hostname, ip) - } -} - -func (vpc *VaultPkiCluster) AddNameToHostFiles(hostname, ip string) error { - updateHostsCmd := []string{ - "sh", "-c", - "echo '" + ip + " " + hostname + "' >> /etc/hosts", - } - for _, node := range vpc.cluster.ClusterNodes { - containerID := node.Container.ID - _, _, retcode, err := dockhelper.RunCmdWithOutput(vpc.cluster.DockerAPI, context.Background(), containerID, updateHostsCmd) - if err != nil { - return fmt.Errorf("failed updating container %s host file: %w", containerID, err) - } - - if retcode != 0 { - return fmt.Errorf("expected zero retcode from updating vault host file in container %s got: %d", containerID, retcode) - } - } - - return nil -} - -func (vpc *VaultPkiCluster) AddDNSRecord(hostname, recordType, ip string) error { - if vpc.Dns == nil { - return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to provision custom records") - } - - vpc.Dns.AddRecord(hostname, recordType, ip) - vpc.Dns.PushConfig() - return nil -} - -func (vpc *VaultPkiCluster) RemoveDNSRecord(domain string, record string, value string) error { - if vpc.Dns == nil { - return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove specific record") - } - - vpc.Dns.RemoveRecord(domain, record, value) - return nil -} - -func (vpc *VaultPkiCluster) RemoveDNSRecordsOfTypeForDomain(domain string, record string) error { - if vpc.Dns == nil { - return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records of type") - } - - vpc.Dns.RemoveRecordsOfTypeForDomain(domain, record) - return nil -} - -func (vpc *VaultPkiCluster) RemoveDNSRecordsForDomain(domain string) error { - if vpc.Dns == nil { - return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove records for domain") - } - - vpc.Dns.RemoveRecordsForDomain(domain) - return nil -} - -func (vpc *VaultPkiCluster) RemoveAllDNSRecords() error { - if vpc.Dns == nil { - return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records") - } - - vpc.Dns.RemoveAllRecords() - return nil -} - -func (vpc *VaultPkiCluster) CreateMount(name string) (*VaultPkiMount, error) { - err := vpc.GetActiveNode().Sys().Mount(name, &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "32h", - AllowedResponseHeaders: []string{ - "Last-Modified", "Replay-Nonce", - "Link", "Location", - }, - }, - }) - if err != nil { - return nil, err - } - - return &VaultPkiMount{ - vpc, - name, - }, nil -} - -func (vpc *VaultPkiCluster) CreateAcmeMount(mountName string) (*VaultPkiMount, error) { - pki, err := vpc.CreateMount(mountName) - if err != nil { - return nil, fmt.Errorf("failed creating mount %s: %w", mountName, err) - } - - err = pki.UpdateClusterConfig(nil) - if err != nil { - return nil, fmt.Errorf("failed updating cluster config: %w", err) - } - - cfg := map[string]interface{}{ - "eab_policy": "not-required", - } - if vpc.Dns != nil { - cfg["dns_resolver"] = vpc.Dns.GetRemoteAddr() - } - - err = pki.UpdateAcmeConfig(true, cfg) - if err != nil { - return nil, fmt.Errorf("failed updating acme config: %w", err) - } - - // Setup root+intermediate CA hierarchy within this mount. - resp, err := pki.GenerateRootInternal(map[string]interface{}{ - "common_name": "Root X1", - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": "ec", - "key_bits": 256, - "use_pss": false, - "issuer_name": "root", - }) - if err != nil { - return nil, fmt.Errorf("failed generating root internal: %w", err) - } - if resp == nil || len(resp.Data) == 0 { - return nil, fmt.Errorf("failed generating root internal: nil or empty response but no error") - } - - resp, err = pki.GenerateIntermediateInternal(map[string]interface{}{ - "common_name": "Intermediate I1", - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": "ec", - "key_bits": 256, - "use_pss": false, - }) - if err != nil { - return nil, fmt.Errorf("failed generating int csr: %w", err) - } - if resp == nil || len(resp.Data) == 0 { - return nil, fmt.Errorf("failed generating int csr: nil or empty response but no error") - } - - resp, err = pki.SignIntermediary("default", resp.Data["csr"], map[string]interface{}{ - "common_name": "Intermediate I1", - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": "ec", - "csr": resp.Data["csr"], - }) - if err != nil { - return nil, fmt.Errorf("failed signing int csr: %w", err) - } - if resp == nil || len(resp.Data) == 0 { - return nil, fmt.Errorf("failed signing int csr: nil or empty response but no error") - } - intCert := resp.Data["certificate"].(string) - - resp, err = pki.ImportBundle(intCert, nil) - if err != nil { - return nil, fmt.Errorf("failed importing signed cert: %w", err) - } - if resp == nil || len(resp.Data) == 0 { - return nil, fmt.Errorf("failed importing signed cert: nil or empty response but no error") - } - - err = pki.UpdateDefaultIssuer(resp.Data["imported_issuers"].([]interface{})[0].(string), nil) - if err != nil { - return nil, fmt.Errorf("failed to set intermediate as default: %w", err) - } - - err = pki.UpdateIssuer("default", map[string]interface{}{ - "leaf_not_after_behavior": "truncate", - }) - if err != nil { - return nil, fmt.Errorf("failed to update intermediate ttl behavior: %w", err) - } - - err = pki.UpdateIssuer("root", map[string]interface{}{ - "leaf_not_after_behavior": "truncate", - }) - if err != nil { - return nil, fmt.Errorf("failed to update root ttl behavior: %w", err) - } - - return pki, nil -} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_mount.go b/builtin/logical/pkiext/pkiext_binary/pki_mount.go deleted file mode 100644 index f949cb6ae56a6..0000000000000 --- a/builtin/logical/pkiext/pkiext_binary/pki_mount.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pkiext_binary - -import ( - "context" - "encoding/base64" - "fmt" - "path" - - "github.com/hashicorp/vault/api" -) - -type VaultPkiMount struct { - *VaultPkiCluster - mount string -} - -func (vpm *VaultPkiMount) UpdateClusterConfig(config map[string]interface{}) error { - defaultPath := "https://" + vpm.cluster.ClusterNodes[0].ContainerIPAddress + ":8200/v1/" + vpm.mount - defaults := map[string]interface{}{ - "path": defaultPath, - "aia_path": defaultPath, - } - - _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/config/cluster", mergeWithDefaults(config, defaults)) - return err -} - -func (vpm *VaultPkiMount) UpdateClusterConfigLocalAddr() (string, error) { - basePath := fmt.Sprintf("https://%s/v1/%s", vpm.GetActiveContainerHostPort(), vpm.mount) - return basePath, vpm.UpdateClusterConfig(map[string]interface{}{ - "path": basePath, - }) -} - -func (vpm *VaultPkiMount) UpdateAcmeConfig(enable bool, config map[string]interface{}) error { - defaults := map[string]interface{}{ - "enabled": enable, - } - - _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/config/acme", mergeWithDefaults(config, defaults)) - return err -} - -func (vpm *VaultPkiMount) GenerateRootInternal(props map[string]interface{}) (*api.Secret, error) { - defaults := map[string]interface{}{ - "common_name": "root-test.com", - "key_type": "ec", - "issuer_name": "root", - } - - return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/root/generate/internal", mergeWithDefaults(props, defaults)) -} - -func (vpm *VaultPkiMount) GenerateIntermediateInternal(props map[string]interface{}) (*api.Secret, error) { - defaults := map[string]interface{}{ - "common_name": "intermediary-test.com", - "key_type": "ec", - "issuer_name": "intermediary", - } - - return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/intermediate/generate/internal", mergeWithDefaults(props, defaults)) -} - -func (vpm *VaultPkiMount) SignIntermediary(signingIssuer string, csr interface{}, props map[string]interface{}) (*api.Secret, error) { - defaults := map[string]interface{}{ - "csr": csr, - } - - return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/issuer/"+signingIssuer+"/sign-intermediate", - mergeWithDefaults(props, defaults)) -} - -func (vpm *VaultPkiMount) ImportBundle(pemBundle interface{}, props map[string]interface{}) (*api.Secret, error) { - defaults := map[string]interface{}{ - "pem_bundle": pemBundle, - } - - return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/issuers/import/bundle", mergeWithDefaults(props, defaults)) -} - -func (vpm *VaultPkiMount) UpdateDefaultIssuer(issuerId string, props map[string]interface{}) error { - defaults := map[string]interface{}{ - "default": issuerId, - } - - _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/config/issuers", mergeWithDefaults(props, defaults)) - - return err -} - -func (vpm *VaultPkiMount) UpdateIssuer(issuerRef string, props map[string]interface{}) error { - defaults := map[string]interface{}{} - - _, err := vpm.GetActiveNode().Logical().JSONMergePatch(context.Background(), - vpm.mount+"/issuer/"+issuerRef, mergeWithDefaults(props, defaults)) - - return err -} - -func (vpm *VaultPkiMount) UpdateRole(roleName string, config map[string]interface{}) error { - defaults := map[string]interface{}{} - - _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), - vpm.mount+"/roles/"+roleName, mergeWithDefaults(config, defaults)) - - return err -} - -func (vpm *VaultPkiMount) GetEabKey(acmeDirectory string) (string, string, error) { - eabPath := path.Join(vpm.mount, acmeDirectory, "/new-eab") - resp, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), eabPath, map[string]interface{}{}) - if err != nil { - return "", "", fmt.Errorf("failed fetching eab from %s: %w", eabPath, err) - } - eabId := resp.Data["id"].(string) - base64EabKey := resp.Data["key"].(string) - // just make sure we get something valid back from the server, we still want to pass back the base64 version - // to the caller... - _, err = base64.RawURLEncoding.DecodeString(base64EabKey) - if err != nil { - return "", "", fmt.Errorf("failed decoding key response field: %s: %w", base64EabKey, err) - } - return eabId, base64EabKey, nil -} - -func mergeWithDefaults(config map[string]interface{}, defaults map[string]interface{}) map[string]interface{} { - myConfig := config - if myConfig == nil { - myConfig = map[string]interface{}{} - } - for key, value := range defaults { - if origVal, exists := config[key]; !exists { - myConfig[key] = value - } else { - myConfig[key] = origVal - } - } - - return myConfig -} diff --git a/builtin/logical/pkiext/test_helpers.go b/builtin/logical/pkiext/test_helpers.go deleted file mode 100644 index 38bbdfe112b55..0000000000000 --- a/builtin/logical/pkiext/test_helpers.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pkiext - -import ( - "bufio" - "bytes" - "crypto" - "crypto/x509" - "encoding/pem" - "fmt" - "testing" - - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" - - "github.com/stretchr/testify/require" -) - -func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { - var missingFields []string - for _, field := range fields { - value, ok := resp.Data[field] - if !ok || value == nil { - missingFields = append(missingFields, field) - } - } - - require.Empty(t, missingFields, "The following fields were required but missing from response:\n%v", resp.Data) -} - -func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - require.NoError(t, err, msgAndArgs...) - if resp.IsError() { - errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) - require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) - } - require.NotNil(t, resp, msgAndArgs...) -} - -func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - require.NoError(t, err, msgAndArgs...) - if resp.IsError() { - errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) - require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) - } - if resp != nil { - msg := fmt.Sprintf("expected nil response but got: %v", resp) - require.Nilf(t, resp, msg, msgAndArgs...) - } -} - -func parseCert(t *testing.T, pemCert string) *x509.Certificate { - block, _ := pem.Decode([]byte(pemCert)) - require.NotNil(t, block, "failed to decode PEM block") - - cert, err := x509.ParseCertificate(block.Bytes) - require.NoError(t, err) - return cert -} - -func parseKey(t *testing.T, pemKey string) crypto.Signer { - block, _ := pem.Decode([]byte(pemKey)) - require.NotNil(t, block, "failed to decode PEM block") - - key, _, err := certutil.ParseDERKey(block.Bytes) - require.NoError(t, err) - return key -} - -type LogConsumerWriter struct { - Consumer func(string) -} - -func (l LogConsumerWriter) Write(p []byte) (n int, err error) { - // TODO this assumes that we're never passed partial log lines, which - // seems a safe assumption for now based on how docker looks to implement - // logging, but might change in the future. - scanner := bufio.NewScanner(bytes.NewReader(p)) - scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) - for scanner.Scan() { - l.Consumer(scanner.Text()) - } - return len(p), nil -} diff --git a/builtin/logical/pkiext/zlint_test.go b/builtin/logical/pkiext/zlint_test.go deleted file mode 100644 index 2f20152605823..0000000000000 --- a/builtin/logical/pkiext/zlint_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package pkiext - -import ( - "context" - "encoding/json" - "sync" - "testing" - - "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/stretchr/testify/require" -) - -var ( - zRunner *docker.Runner - buildZLintOnce sync.Once -) - -func buildZLintContainer(t *testing.T) { - containerfile := ` -FROM docker.mirror.hashicorp.services/library/golang:latest - -RUN go install github.com/zmap/zlint/v3/cmd/zlint@latest -` - - bCtx := docker.NewBuildContext() - - imageName := "vault_pki_zlint_validator" - imageTag := "latest" - - var err error - zRunner, err = docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: imageName, - ImageTag: imageTag, - ContainerName: "pki_zlint", - // We want to run sleep in the background so we're not stuck waiting - // for the default golang container's shell to prompt for input. - Entrypoint: []string{"sleep", "45"}, - LogConsumer: func(s string) { - if t.Failed() { - t.Logf("container logs: %s", s) - } - }, - }) - if err != nil { - t.Fatalf("Could not provision docker service runner: %s", err) - } - - ctx := context.Background() - output, err := zRunner.BuildImage(ctx, containerfile, bCtx, - docker.BuildRemove(true), docker.BuildForceRemove(true), - docker.BuildPullParent(true), - docker.BuildTags([]string{imageName + ":" + imageTag})) - if err != nil { - t.Fatalf("Could not build new image: %v", err) - } - - t.Logf("Image build output: %v", string(output)) -} - -func RunZLintContainer(t *testing.T, certificate string) []byte { - buildZLintOnce.Do(func() { - buildZLintContainer(t) - }) - - ctx := context.Background() - // We don't actually care about the address, we just want to start the - // container so we can run commands in it. We'd ideally like to skip this - // step and only build a new image, but the zlint output would be - // intermingled with container build stages, so its not that useful. - result, err := zRunner.Start(ctx, true, false) - if err != nil { - t.Fatalf("Could not start golang container for zlint: %s", err) - } - - // Copy the cert into the newly running container. - certCtx := docker.NewBuildContext() - certCtx["cert.pem"] = docker.PathContentsFromBytes([]byte(certificate)) - if err := zRunner.CopyTo(result.Container.ID, "/go/", certCtx); err != nil { - t.Fatalf("Could not copy certificate into container: %v", err) - } - - // Run the zlint command and save the output. - cmd := []string{"/go/bin/zlint", "/go/cert.pem"} - stdout, stderr, retcode, err := zRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) - if err != nil { - t.Fatalf("Could not run command in container: %v", err) - } - - if len(stderr) != 0 { - t.Logf("Got stderr from command:\n%v\n", string(stderr)) - } - - if retcode != 0 { - t.Logf("Got stdout from command:\n%v\n", string(stdout)) - t.Fatalf("Got unexpected non-zero retcode from zlint: %v\n", retcode) - } - - // Clean up after ourselves. - if err := zRunner.Stop(context.Background(), result.Container.ID); err != nil { - t.Fatalf("failed to stop container: %v", err) - } - - return stdout -} - -func RunZLintRootTest(t *testing.T, keyType string, keyBits int, usePSS bool, ignored []string) { - b, s := pki.CreateBackendWithStorage(t) - - resp, err := pki.CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Root X1", - "country": "US", - "organization": "Dadgarcorp", - "ou": "QA", - "key_type": keyType, - "key_bits": keyBits, - "use_pss": usePSS, - }) - require.NoError(t, err) - rootCert := resp.Data["certificate"].(string) - - var parsed map[string]interface{} - output := RunZLintContainer(t, rootCert) - - if err := json.Unmarshal(output, &parsed); err != nil { - t.Fatalf("failed to parse zlint output as JSON: %v\nOutput:\n%v\n\n", err, string(output)) - } - - for key, rawValue := range parsed { - value := rawValue.(map[string]interface{}) - result, ok := value["result"] - if !ok || result == "NA" { - continue - } - - if result == "error" { - skip := false - for _, allowedFailures := range ignored { - if allowedFailures == key { - skip = true - break - } - } - - if !skip { - t.Fatalf("got unexpected error from test %v: %v", key, value) - } - } - } -} - -func Test_ZLintRSA2048(t *testing.T) { - t.Parallel() - RunZLintRootTest(t, "rsa", 2048, false, nil) -} - -func Test_ZLintRSA2048PSS(t *testing.T) { - t.Parallel() - RunZLintRootTest(t, "rsa", 2048, true, nil) -} - -func Test_ZLintRSA3072(t *testing.T) { - t.Parallel() - RunZLintRootTest(t, "rsa", 3072, false, nil) -} - -func Test_ZLintRSA3072PSS(t *testing.T) { - t.Parallel() - RunZLintRootTest(t, "rsa", 3072, true, nil) -} - -func Test_ZLintECDSA256(t *testing.T) { - t.Parallel() - RunZLintRootTest(t, "ec", 256, false, nil) -} - -func Test_ZLintECDSA384(t *testing.T) { - t.Parallel() - RunZLintRootTest(t, "ec", 384, false, nil) -} - -func Test_ZLintECDSA521(t *testing.T) { - t.Parallel() - // Mozilla doesn't allow P-521 ECDSA keys. - RunZLintRootTest(t, "ec", 521, false, []string{ - "e_mp_ecdsa_pub_key_encoding_correct", - "e_mp_ecdsa_signature_encoding_correct", - }) -} diff --git a/builtin/logical/postgresql/backend.go b/builtin/logical/postgresql/backend.go new file mode 100644 index 0000000000000..8763cbda4ef36 --- /dev/null +++ b/builtin/logical/postgresql/backend.go @@ -0,0 +1,171 @@ +package postgresql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend(conf) + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(conf *logical.BackendConfig) *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + + Paths: []*framework.Path{ + pathConfigConnection(&b), + pathConfigLease(&b), + pathListRoles(&b), + pathRoles(&b), + pathRoleCreate(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Clean: b.ResetDB, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + + b.logger = conf.Logger + return &b +} + +type backend struct { + *framework.Backend + + db *sql.DB + lock sync.Mutex + + logger log.Logger +} + +// DB returns the database connection. +func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { + b.logger.Debug("postgres/db: enter") + defer b.logger.Debug("postgres/db: exit") + + b.lock.Lock() + defer b.lock.Unlock() + + // If we already have a DB, we got it! + if b.db != nil { + if err := b.db.Ping(); err == nil { + return b.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + b.db.Close() + } + + // Otherwise, attempt to make connection + entry, err := s.Get(ctx, "config/connection") + if err != nil { + return nil, err + } + if entry == nil { + return nil, + fmt.Errorf("configure the DB connection with config/connection first") + } + + var connConfig connectionConfig + if err := entry.DecodeJSON(&connConfig); err != nil { + return nil, err + } + + conn := connConfig.ConnectionURL + if len(conn) == 0 { + conn = connConfig.ConnectionString + } + + // Ensure timezone is set to UTC for all the connections + if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { + if strings.Contains(conn, "?") { + conn += "&timezone=utc" + } else { + conn += "?timezone=utc" + } + } else { + conn += "&timezone=utc" + } + + b.db, err = sql.Open("pgx", conn) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + b.db.SetMaxOpenConns(connConfig.MaxOpenConnections) + b.db.SetMaxIdleConns(connConfig.MaxIdleConnections) + + return b.db, nil +} + +// ResetDB forces a connection next time DB() is called. +func (b *backend) ResetDB(_ context.Context) { + b.logger.Debug("postgres/db: enter") + defer b.logger.Debug("postgres/db: exit") + + b.lock.Lock() + defer b.lock.Unlock() + + if b.db != nil { + b.db.Close() + } + + b.db = nil +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch key { + case "config/connection": + b.ResetDB(ctx) + } +} + +// Lease returns the lease information +func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, "config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +const backendHelp = ` +The PostgreSQL backend dynamically generates database users. + +After mounting this backend, configure it using the endpoints within +the "config/" path. +` diff --git a/builtin/logical/postgresql/backend_test.go b/builtin/logical/postgresql/backend_test.go new file mode 100644 index 0000000000000..0c1dd46111d09 --- /dev/null +++ b/builtin/logical/postgresql/backend_test.go @@ -0,0 +1,532 @@ +package postgresql + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "path" + "reflect" + "testing" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func TestBackend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_url": "sample_connection_url", + "max_open_connections": 9, + "max_idle_connections": 7, + "verify_connection": false, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(configData, "verify_connection") + delete(configData, "connection_url") + if !reflect.DeepEqual(configData, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data) + } +} + +func TestBackend_basic(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepCreateRole(t, "web", testRole, false), + testAccStepReadCreds(t, b, config.StorageView, "web", connURL), + }, + }) +} + +func TestBackend_roleCrud(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepCreateRole(t, "web", testRole, false), + testAccStepReadRole(t, "web", testRole), + testAccStepDeleteRole(t, "web"), + testAccStepReadRole(t, "web", ""), + }, + }) +} + +func TestBackend_BlockStatements(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + jsonBlockStatement, err := json.Marshal(testBlockStatementRoleSlice) + if err != nil { + t.Fatal(err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + // This will also validate the query + testAccStepCreateRole(t, "web-block", testBlockStatementRole, true), + testAccStepCreateRole(t, "web-block", string(jsonBlockStatement), false), + }, + }) +} + +func TestBackend_roleReadOnly(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepCreateRole(t, "web", testRole, false), + testAccStepCreateRole(t, "web-readonly", testReadOnlyRole, false), + testAccStepReadRole(t, "web-readonly", testReadOnlyRole), + testAccStepCreateTable(t, b, config.StorageView, "web", connURL), + testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL), + testAccStepDropTable(t, b, config.StorageView, "web", connURL), + testAccStepDeleteRole(t, "web-readonly"), + testAccStepDeleteRole(t, "web"), + testAccStepReadRole(t, "web-readonly", ""), + }, + }) +} + +func TestBackend_roleReadOnly_revocationSQL(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connData := map[string]interface{}{ + "connection_url": connURL, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData, false), + testAccStepCreateRoleWithRevocationSQL(t, "web", testRole, defaultRevocationSQL, false), + testAccStepCreateRoleWithRevocationSQL(t, "web-readonly", testReadOnlyRole, defaultRevocationSQL, false), + testAccStepReadRole(t, "web-readonly", testReadOnlyRole), + testAccStepCreateTable(t, b, config.StorageView, "web", connURL), + testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL), + testAccStepDropTable(t, b, config.StorageView, "web", connURL), + testAccStepDeleteRole(t, "web-readonly"), + testAccStepDeleteRole(t, "web"), + testAccStepReadRole(t, "web-readonly", ""), + }, + }) +} + +func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Data: d, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if expectError { + if resp.Data == nil { + return fmt.Errorf("data is nil") + } + var e struct { + Error string `mapstructure:"error"` + } + if err := mapstructure.Decode(resp.Data, &e); err != nil { + return err + } + if len(e.Error) == 0 { + return fmt.Errorf("expected error, but write succeeded") + } + return nil + } else if resp != nil && resp.IsError() { + return fmt.Errorf("got an error response: %v", resp.Error()) + } + return nil + }, + } +} + +func testAccStepCreateRole(t *testing.T, name string, sql string, expectFail bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: path.Join("roles", name), + Data: map[string]interface{}{ + "sql": sql, + }, + ErrorOk: expectFail, + } +} + +func testAccStepCreateRoleWithRevocationSQL(t *testing.T, name, sql, revocationSQL string, expectFail bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: path.Join("roles", name), + Data: map[string]interface{}{ + "sql": sql, + "revocation_sql": revocationSQL, + }, + ErrorOk: expectFail, + } +} + +func testAccStepDeleteRole(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: path.Join("roles", name), + } +} + +func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: path.Join("creds", name), + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[TRACE] Generated credentials: %v", d) + + db, err := sql.Open("pgx", connURL+"&timezone=utc") + if err != nil { + t.Fatal(err) + } + + returnedRows := func() int { + stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');") + if err != nil { + return -1 + } + defer stmt.Close() + + rows, err := stmt.Query(d.Username) + if err != nil { + return -1 + } + defer rows.Close() + + i := 0 + for rows.Next() { + i++ + } + return i + } + + // minNumPermissions is the minimum number of permissions that will always be present. + const minNumPermissions = 2 + + userRows := returnedRows() + if userRows < minNumPermissions { + t.Fatalf("did not get expected number of rows, got %d", userRows) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RevokeOperation, + Storage: s, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": d.Username, + "role": name, + }, + }, + }) + if err != nil { + return err + } + if resp != nil { + if resp.IsError() { + return fmt.Errorf("error on resp: %#v", *resp) + } + } + + userRows = returnedRows() + // User shouldn't exist so returnedRows() should encounter an error and exit with -1 + if userRows != -1 { + t.Fatalf("did not get expected number of rows, got %d", userRows) + } + + return nil + }, + } +} + +func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: path.Join("creds", name), + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[TRACE] Generated credentials: %v", d) + + db, err := sql.Open("pgx", connURL+"&timezone=utc") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("CREATE TABLE test (id SERIAL PRIMARY KEY);") + if err != nil { + t.Fatal(err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RevokeOperation, + Storage: s, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": d.Username, + }, + }, + }) + if err != nil { + return err + } + if resp != nil { + if resp.IsError() { + return fmt.Errorf("error on resp: %#v", *resp) + } + } + + return nil + }, + } +} + +func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: path.Join("creds", name), + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[TRACE] Generated credentials: %v", d) + + db, err := sql.Open("pgx", connURL+"&timezone=utc") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("DROP TABLE test;") + if err != nil { + t.Fatal(err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RevokeOperation, + Storage: s, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": d.Username, + }, + }, + }) + if err != nil { + return err + } + if resp != nil { + if resp.IsError() { + return fmt.Errorf("error on resp: %#v", *resp) + } + } + + return nil + }, + } +} + +func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if sql == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + SQL string `mapstructure:"sql"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.SQL != sql { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +const testRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const testReadOnlyRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}"; +GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}"; +` + +const testBlockStatementRole = ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ + +CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; +GRANT "foo-role" TO "{{name}}"; +ALTER ROLE "{{name}}" SET search_path = foo; +GRANT CONNECT ON DATABASE "postgres" TO "{{name}}"; +` + +var testBlockStatementRoleSlice = []string{ + ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ +`, + `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`, + `GRANT "foo-role" TO "{{name}}";`, + `ALTER ROLE "{{name}}" SET search_path = foo;`, + `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`, +} + +const defaultRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; +REVOKE USAGE ON SCHEMA public FROM {{name}}; + +DROP ROLE IF EXISTS {{name}}; +` diff --git a/builtin/logical/postgresql/cmd/postgresql/main.go b/builtin/logical/postgresql/cmd/postgresql/main.go new file mode 100644 index 0000000000000..6610b975769d3 --- /dev/null +++ b/builtin/logical/postgresql/cmd/postgresql/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/postgresql" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: postgresql.Factory, + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/postgresql/path_config_connection.go b/builtin/logical/postgresql/path_config_connection.go new file mode 100644 index 0000000000000..6f7b0f719e340 --- /dev/null +++ b/builtin/logical/postgresql/path_config_connection.go @@ -0,0 +1,168 @@ +package postgresql + +import ( + "context" + "database/sql" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + _ "github.com/jackc/pgx/v4/stdlib" +) + +func pathConfigConnection(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/connection", + Fields: map[string]*framework.FieldSchema{ + "connection_url": { + Type: framework.TypeString, + Description: "DB connection string", + }, + + "value": { + Type: framework.TypeString, + Description: `DB connection string. Use 'connection_url' instead. +This will be deprecated.`, + }, + + "verify_connection": { + Type: framework.TypeBool, + Default: true, + Description: `If set, connection_url is verified by actually connecting to the database`, + }, + + "max_open_connections": { + Type: framework.TypeInt, + Description: `Maximum number of open connections to the database; +a zero uses the default value of two and a +negative value means unlimited`, + }, + + "max_idle_connections": { + Type: framework.TypeInt, + Description: `Maximum number of idle connections to the database; +a zero uses the value of max_open_connections +and a negative value disables idle connections. +If larger than max_open_connections it will be +reduced to the same size.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionWrite, + logical.ReadOperation: b.pathConnectionRead, + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +// pathConnectionRead reads out the connection configuration +func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := req.Storage.Get(ctx, "config/connection") + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration") + } + if entry == nil { + return nil, nil + } + + var config connectionConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "max_open_connections": config.MaxOpenConnections, + "max_idle_connections": config.MaxIdleConnections, + }, + }, nil +} + +func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + connValue := data.Get("value").(string) + connURL := data.Get("connection_url").(string) + if connURL == "" { + if connValue == "" { + return logical.ErrorResponse("connection_url parameter must be supplied"), nil + } else { + connURL = connValue + } + } + + maxOpenConns := data.Get("max_open_connections").(int) + if maxOpenConns == 0 { + maxOpenConns = 2 + } + + maxIdleConns := data.Get("max_idle_connections").(int) + if maxIdleConns == 0 { + maxIdleConns = maxOpenConns + } + if maxIdleConns > maxOpenConns { + maxIdleConns = maxOpenConns + } + + // Don't check the connection_url if verification is disabled + verifyConnection := data.Get("verify_connection").(bool) + if verifyConnection { + // Verify the string + db, err := sql.Open("pgx", connURL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error validating connection info: %s", err)), nil + } + defer db.Close() + if err := db.Ping(); err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error validating connection info: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ + ConnectionString: connValue, + ConnectionURL: connURL, + MaxOpenConnections: maxOpenConns, + MaxIdleConnections: maxIdleConns, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + // Reset the DB connection + b.ResetDB(ctx) + + resp := &logical.Response{} + resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string or URL as it is, including passwords, if any.") + + return resp, nil +} + +type connectionConfig struct { + ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` + // Deprecate "value" in coming releases + ConnectionString string `json:"value" structs:"value" mapstructure:"value"` + MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"` +} + +const pathConfigConnectionHelpSyn = ` +Configure the connection string to talk to PostgreSQL. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection string used to connect to PostgreSQL. +The value of the string can be a URL, or a PG style string in the +format of "user=foo host=bar" etc. + +The URL looks like: +"postgresql://user:pass@host:port/dbname" + +When configuring the connection string, the backend will verify its validity. +` diff --git a/builtin/logical/postgresql/path_config_lease.go b/builtin/logical/postgresql/path_config_lease.go new file mode 100644 index 0000000000000..1a86059260226 --- /dev/null +++ b/builtin/logical/postgresql/path_config_lease.go @@ -0,0 +1,101 @@ +package postgresql + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + Fields: map[string]*framework.FieldSchema{ + "lease": { + Type: framework.TypeString, + Description: "Default lease for roles.", + }, + + "lease_max": { + Type: framework.TypeString, + Description: "Maximum time a credential is valid for.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseWrite, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + leaseRaw := d.Get("lease").(string) + leaseMaxRaw := d.Get("lease_max").(string) + + lease, err := time.ParseDuration(leaseRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid lease: %s", err)), nil + } + leaseMax, err := time.ParseDuration(leaseMaxRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid lease: %s", err)), nil + } + + // Store it + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + Lease: lease, + LeaseMax: leaseMax, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "lease": lease.Lease.String(), + "lease_max": lease.LeaseMax.String(), + }, + }, nil +} + +type configLease struct { + Lease time.Duration + LeaseMax time.Duration +} + +const pathConfigLeaseHelpSyn = ` +Configure the default lease information for generated credentials. +` + +const pathConfigLeaseHelpDesc = ` +This configures the default lease information used for credentials +generated by this backend. The lease specifies the duration that a +credential will be valid for, as well as the maximum session for +a set of credentials. + +The format for the lease is "1h" or integer and then unit. The longest +unit is hour. +` diff --git a/builtin/logical/postgresql/path_role_create.go b/builtin/logical/postgresql/path_role_create.go new file mode 100644 index 0000000000000..18162db1733a4 --- /dev/null +++ b/builtin/logical/postgresql/path_role_create.go @@ -0,0 +1,149 @@ +package postgresql + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/logical" + _ "github.com/jackc/pgx/v4/stdlib" +) + +func pathRoleCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleCreateRead, + }, + + HelpSynopsis: pathRoleCreateReadHelpSyn, + HelpDescription: pathRoleCreateReadHelpDesc, + } +} + +func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + // Determine if we have a lease + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + // Unlike some other backends we need a lease here (can't leave as 0 and + // let core fill it in) because Postgres also expires users as a safety + // measure, so cannot be zero + if lease == nil { + lease = &configLease{ + Lease: b.System().DefaultLeaseTTL(), + } + } + + // Generate the username, password and expiration. PG limits user to 63 characters + displayName := req.DisplayName + if len(displayName) > 26 { + displayName = displayName[:26] + } + userUUID, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + username := fmt.Sprintf("%s-%s", displayName, userUUID) + if len(username) > 63 { + username = username[:63] + } + password, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + ttl, _, err := framework.CalculateTTL(b.System(), 0, lease.Lease, 0, lease.LeaseMax, 0, time.Time{}) + if err != nil { + return nil, err + } + expiration := time.Now(). + Add(ttl). + Format("2006-01-02 15:04:05-0700") + + // Get our handle + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return nil, err + } + defer func() { + tx.Rollback() + }() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "password": password, + "expiration": expiration, + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return nil, err + } + } + + // Commit the transaction + + if err := tx.Commit(); err != nil { + return nil, err + } + + // Return the secret + + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + "role": name, + }) + resp.Secret.TTL = lease.Lease + resp.Secret.MaxTTL = lease.LeaseMax + return resp, nil +} + +const pathRoleCreateReadHelpSyn = ` +Request database credentials for a certain role. +` + +const pathRoleCreateReadHelpDesc = ` +This path reads database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/builtin/logical/postgresql/path_roles.go b/builtin/logical/postgresql/path_roles.go new file mode 100644 index 0000000000000..b1af8328f928d --- /dev/null +++ b/builtin/logical/postgresql/path_roles.go @@ -0,0 +1,197 @@ +package postgresql + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + "sql": { + Type: framework.TypeString, + Description: "SQL string to create a user. See help for more info.", + }, + + "revocation_sql": { + Type: framework.TypeString, + Description: `SQL statements to be executed to revoke a user. Must be a semicolon-separated +string, a base64-encoded semicolon-separated string, a serialized JSON string +array, or a base64-encoded serialized JSON string array. The '{{name}}' value +will be substituted.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleCreate, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "sql": role.SQL, + "revocation_sql": role.RevocationSQL, + }, + }, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + sql := data.Get("sql").(string) + + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Test the query by trying to prepare it + for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := db.Prepare(Query(query, map[string]string{ + "name": "foo", + "password": "bar", + "expiration": "", + })) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error testing query: %s", err)), nil + } + stmt.Close() + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ + SQL: sql, + RevocationSQL: data.Get("revocation_sql").(string), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +type roleEntry struct { + SQL string `json:"sql" mapstructure:"sql" structs:"sql"` + RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "sql" parameter customizes the SQL string used to create the role. +This can be a sequence of SQL queries. Some substitution will be done to the +SQL string for certain keys. The names of the variables must be surrounded +by "{{" and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + + * "expiration" - The timestamp when this user will expire. + +Example of a decent SQL query to use: + + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; + +Note the above user would be able to access everything in schema public. +For more complex GRANT clauses, see the PostgreSQL manual. + +The "revocation_sql" parameter customizes the SQL string used to revoke a user. +Example of a decent revocation SQL query to use: + + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; + REVOKE USAGE ON SCHEMA public FROM {{name}}; + DROP ROLE IF EXISTS {{name}}; +` diff --git a/builtin/logical/postgresql/query.go b/builtin/logical/postgresql/query.go new file mode 100644 index 0000000000000..e250a6fe3bda0 --- /dev/null +++ b/builtin/logical/postgresql/query.go @@ -0,0 +1,15 @@ +package postgresql + +import ( + "fmt" + "strings" +) + +// Query templates a query for us. +func Query(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + + return tpl +} diff --git a/builtin/logical/postgresql/secret_creds.go b/builtin/logical/postgresql/secret_creds.go new file mode 100644 index 0000000000000..74e8f2fffd922 --- /dev/null +++ b/builtin/logical/postgresql/secret_creds.go @@ -0,0 +1,269 @@ +package postgresql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/logical" +) + +const SecretCredsType = "creds" + +func secretCreds(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username", + }, + + "password": { + Type: framework.TypeString, + Description: "Password", + }, + }, + + Renew: b.secretCredsRenew, + Revoke: b.secretCredsRevoke, + } +} + +func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("usernameRaw is not a string") + } + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Get the lease information + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } + + // Make sure we increase the VALID UNTIL endpoint for this user. + ttl, _, err := framework.CalculateTTL(b.System(), req.Secret.Increment, lease.Lease, 0, lease.LeaseMax, 0, req.Secret.IssueTime) + if err != nil { + return nil, err + } + if ttl > 0 { + expireTime := time.Now().Add(ttl) + // Adding a small buffer since the TTL will be calculated again afeter this call + // to ensure the database credential does not expire before the lease + expireTime = expireTime.Add(5 * time.Second) + expiration := expireTime.Format("2006-01-02 15:04:05-0700") + + query := fmt.Sprintf( + "ALTER ROLE %s VALID UNTIL '%s';", + dbutil.QuoteIdentifier(username), + expiration) + stmt, err := db.Prepare(query) + if err != nil { + return nil, err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return nil, err + } + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = lease.Lease + resp.Secret.MaxTTL = lease.LeaseMax + return resp, nil +} + +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("usernameRaw is not a string") + } + var revocationSQL string + var resp *logical.Response + + roleNameRaw, ok := req.Secret.InternalData["role"] + if ok { + role, err := b.Role(ctx, req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role == nil { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default revocation SQL.", roleNameRaw.(string))) + } else { + revocationSQL = role.RevocationSQL + } + } + + // Get our connection + db, err := b.DB(ctx, req.Storage) + if err != nil { + return nil, err + } + + switch revocationSQL { + + // This is the default revocation logic. If revocation SQL is provided it + // is simply executed as-is. + case "": + // Check if the role exists + var exists bool + err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return nil, err + } + + if !exists { + return resp, nil + } + + // Query for permissions; we need to revoke permissions before we can drop + // the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") + if err != nil { + return nil, err + } + defer stmt.Close() + + rows, err := stmt.Query(username) + if err != nil { + return nil, err + } + defer rows.Close() + + const initialNumRevocations = 16 + revocationStmts := make([]string, 0, initialNumRevocations) + for rows.Next() { + var schema string + err = rows.Scan(&schema) + if err != nil { + // keep going; remove as many permissions as possible right now + continue + } + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, + dbutil.QuoteIdentifier(schema), + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE USAGE ON SCHEMA %s FROM %s;`, + dbutil.QuoteIdentifier(schema), + dbutil.QuoteIdentifier(username))) + } + + // for good measure, revoke all privileges and usage on schema public + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;", + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE USAGE ON SCHEMA public FROM %s;", + dbutil.QuoteIdentifier(username))) + + // get the current database name so we can issue a REVOKE CONNECT for + // this username + var dbname sql.NullString + if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil { + return nil, err + } + + if dbname.Valid { + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE CONNECT ON DATABASE %s FROM %s;`, + dbutil.QuoteIdentifier(dbname.String), + dbutil.QuoteIdentifier(username))) + } + + // again, here, we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revocationStmts { + if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { + lastStmtError = err + } + } + + // can't drop if not all privileges are revoked + if rows.Err() != nil { + return nil, fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err()) + } + if lastStmtError != nil { + return nil, fmt.Errorf("could not perform all revocation statements: %w", lastStmtError) + } + + // Drop this user + stmt, err = db.Prepare(fmt.Sprintf( + `DROP ROLE IF EXISTS %s;`, dbutil.QuoteIdentifier(username))) + if err != nil { + return nil, err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return nil, err + } + + // We have revocation SQL, execute directly, within a transaction + default: + tx, err := db.Begin() + if err != nil { + return nil, err + } + defer func() { + tx.Rollback() + }() + + for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return nil, err + } + } + + if err := tx.Commit(); err != nil { + return nil, err + } + } + + return resp, nil +} diff --git a/builtin/logical/rabbitmq/backend.go b/builtin/logical/rabbitmq/backend.go index 20ad1afd18f1e..d1f223810a65a 100644 --- a/builtin/logical/rabbitmq/backend.go +++ b/builtin/logical/rabbitmq/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( @@ -14,8 +11,6 @@ import ( rabbithole "github.com/michaelklishin/rabbit-hole/v2" ) -const operationPrefixRabbitMQ = "rabbit-mq" - // Factory creates and configures the backend func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go index 61b18dac9d1ce..89659796b5e5c 100644 --- a/builtin/logical/rabbitmq/backend_test.go +++ b/builtin/logical/rabbitmq/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( @@ -11,8 +8,8 @@ import ( "testing" "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" rabbithole "github.com/michaelklishin/rabbit-hole/v2" @@ -39,7 +36,7 @@ func prepareRabbitMQTestContainer(t *testing.T) (func(), string) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/library/rabbitmq", + ImageRepo: "rabbitmq", ImageTag: "3-management", ContainerName: "rabbitmq", Ports: []string{"15672/tcp"}, diff --git a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go index 2cb62daca51ab..516f699eaee6c 100644 --- a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go +++ b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: rabbitmq.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/rabbitmq/passwords.go b/builtin/logical/rabbitmq/passwords.go index 8ba08a0afa4b5..01bfd41f0db2b 100644 --- a/builtin/logical/rabbitmq/passwords.go +++ b/builtin/logical/rabbitmq/passwords.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_config_connection.go b/builtin/logical/rabbitmq/path_config_connection.go index 897945512338a..51abe2547aa19 100644 --- a/builtin/logical/rabbitmq/path_config_connection.go +++ b/builtin/logical/rabbitmq/path_config_connection.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( @@ -20,13 +17,6 @@ const ( func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRabbitMQ, - OperationVerb: "configure", - OperationSuffix: "connection", - }, - Fields: map[string]*framework.FieldSchema{ "connection_uri": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/path_config_connection_test.go b/builtin/logical/rabbitmq/path_config_connection_test.go index 55e6b2cd042cb..dddee8f0c9b88 100644 --- a/builtin/logical/rabbitmq/path_config_connection_test.go +++ b/builtin/logical/rabbitmq/path_config_connection_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_config_lease.go b/builtin/logical/rabbitmq/path_config_lease.go index 9436f3f4924fa..0b6bb572188d5 100644 --- a/builtin/logical/rabbitmq/path_config_lease.go +++ b/builtin/logical/rabbitmq/path_config_lease.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( @@ -15,11 +12,6 @@ import ( func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRabbitMQ, - }, - Fields: map[string]*framework.FieldSchema{ "ttl": { Type: framework.TypeDurationSecond, @@ -33,21 +25,9 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathLeaseRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "read", - OperationSuffix: "lease-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathLeaseUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "lease", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseUpdate, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/rabbitmq/path_config_lease_test.go b/builtin/logical/rabbitmq/path_config_lease_test.go index 9e565c56f69b1..ec7e7e169c087 100644 --- a/builtin/logical/rabbitmq/path_config_lease_test.go +++ b/builtin/logical/rabbitmq/path_config_lease_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_role_create.go b/builtin/logical/rabbitmq/path_role_create.go index fd3f256ffbfc1..5ad1ff6bdf417 100644 --- a/builtin/logical/rabbitmq/path_role_create.go +++ b/builtin/logical/rabbitmq/path_role_create.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( @@ -21,13 +18,6 @@ const ( func pathCreds(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRabbitMQ, - OperationVerb: "request", - OperationSuffix: "credentials", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/path_role_create_test.go b/builtin/logical/rabbitmq/path_role_create_test.go index ecb9746872513..2c3d5f4b86d66 100644 --- a/builtin/logical/rabbitmq/path_role_create_test.go +++ b/builtin/logical/rabbitmq/path_role_create_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_roles.go b/builtin/logical/rabbitmq/path_roles.go index 98c2f3d1f487d..2031c7d99ec59 100644 --- a/builtin/logical/rabbitmq/path_roles.go +++ b/builtin/logical/rabbitmq/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( @@ -16,10 +13,6 @@ import ( func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRabbitMQ, - OperationSuffix: "roles", - }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -31,10 +24,6 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixRabbitMQ, - OperationSuffix: "role", - }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/secret_creds.go b/builtin/logical/rabbitmq/secret_creds.go index 2d0cce30858b6..b31dfc7188aa5 100644 --- a/builtin/logical/rabbitmq/secret_creds.go +++ b/builtin/logical/rabbitmq/secret_creds.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package rabbitmq import ( diff --git a/builtin/logical/ssh/backend.go b/builtin/logical/ssh/backend.go index 0606b3eae56f4..fe4f40b334c97 100644 --- a/builtin/logical/ssh/backend.go +++ b/builtin/logical/ssh/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -13,8 +10,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const operationPrefixSSH = "ssh" - type backend struct { *framework.Backend view logical.Storage @@ -52,12 +47,13 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { SealWrapStorage: []string{ caPrivateKey, caPrivateKeyStoragePath, - keysStoragePrefix, + "keys/", }, }, Paths: []*framework.Path{ pathConfigZeroAddress(&b), + pathKeys(&b), pathListRoles(&b), pathRoles(&b), pathCredsCreate(&b), @@ -67,10 +63,10 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { pathSign(&b), pathIssue(&b), pathFetchPublicKey(&b), - pathCleanupKeys(&b), }, Secrets: []*framework.Secret{ + secretDynamicKey(&b), secretOTP(&b), }, @@ -116,8 +112,8 @@ const backendHelp = ` The SSH backend generates credentials allowing clients to establish SSH connections to remote hosts. -There are two variants of the backend, which generate different types of -credentials: One-Time Passwords (OTPs) and certificate authority. The desired behavior +There are three variants of the backend, which generate different types of +credentials: dynamic keys, One-Time Passwords (OTPs) and certificate authority. The desired behavior is role-specific and chosen at role creation time with the 'key_type' parameter. diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 13f9f73624af4..27934d42af17f 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -16,28 +13,29 @@ import ( "time" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/logical" + + "golang.org/x/crypto/ssh" + "github.com/hashicorp/vault/builtin/credential/userpass" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ssh" ) const ( - testIP = "127.0.0.1" - testUserName = "vaultssh" - testMultiUserName = "vaultssh,otherssh" - testAdminUser = "vaultssh" - testCaKeyType = "ca" - testOTPKeyType = "otp" - testCIDRList = "127.0.0.1/32" - testAtRoleName = "test@RoleName" - testOTPRoleName = "testOTPRoleName" + testIP = "127.0.0.1" + testUserName = "vaultssh" + testAdminUser = "vaultssh" + testCaKeyType = "ca" + testOTPKeyType = "otp" + testDynamicKeyType = "dynamic" + testCIDRList = "127.0.0.1/32" + testAtRoleName = "test@RoleName" + testDynamicRoleName = "testDynamicRoleName" + testOTPRoleName = "testOTPRoleName" // testKeyName is the name of the entry that will be written to SSHMOUNTPOINT/ssh/keys testKeyName = "testKeyName" // testSharedPrivateKey is the value of the entry that will be written to SSHMOUNTPOINT/ssh/keys @@ -132,15 +130,13 @@ SjOQL/GkH1nkRcDS9++aAAAAAmNhAQID dockerImageTagSupportsNoRSA1 = "8.4_p1-r3-ls48" ) -var ctx = context.Background() - func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) { if tag == "" { tag = dockerImageTagSupportsNoRSA1 } runner, err := docker.NewServiceRunner(docker.RunOptions{ ContainerName: "openssh", - ImageRepo: "docker.mirror.hashicorp.services/linuxserver/openssh-server", + ImageRepo: "linuxserver/openssh-server", ImageTag: tag, Env: []string{ "DOCKER_MODS=linuxserver/mods:openssh-server-openssh-client", @@ -360,15 +356,6 @@ func TestBackend_AllowedUsersTemplate(t *testing.T) { ) } -func TestBackend_MultipleAllowedUsersTemplate(t *testing.T) { - testAllowedUsersTemplate(t, - "{{ identity.entity.metadata.ssh_username }}", - testUserName, map[string]string{ - "ssh_username": testMultiUserName, - }, - ) -} - func TestBackend_AllowedUsersTemplate_WithStaticPrefix(t *testing.T) { testAllowedUsersTemplate(t, "ssh-{{ identity.entity.metadata.ssh_username }}", @@ -524,7 +511,7 @@ func newTestingFactory(t *testing.T) func(ctx context.Context, conf *logical.Bac defaultLeaseTTLVal := 2 * time.Minute maxLeaseTTLVal := 10 * time.Minute return Factory(context.Background(), &logical.BackendConfig{ - Logger: corehelpers.NewTestLogger(t), + Logger: vault.NewTestLogger(t), StorageView: &logical.InmemStorage{}, System: &logical.StaticSystemView{ DefaultLeaseTTLVal: defaultLeaseTTLVal, @@ -540,22 +527,36 @@ func TestSSHBackend_Lookup(t *testing.T) { "default_user": testUserName, "cidr_list": testCIDRList, } + testDynamicRoleData := map[string]interface{}{ + "key_type": testDynamicKeyType, + "key": testKeyName, + "admin_user": testAdminUser, + "default_user": testAdminUser, + "cidr_list": testCIDRList, + } data := map[string]interface{}{ "ip": testIP, } resp1 := []string(nil) resp2 := []string{testOTPRoleName} - resp3 := []string{testAtRoleName} + resp3 := []string{testDynamicRoleName, testOTPRoleName} + resp4 := []string{testDynamicRoleName} + resp5 := []string{testAtRoleName} logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: newTestingFactory(t), Steps: []logicaltest.TestStep{ testLookupRead(t, data, resp1), testRoleWrite(t, testOTPRoleName, testOTPRoleData), testLookupRead(t, data, resp2), + testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), + testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), + testLookupRead(t, data, resp3), testRoleDelete(t, testOTPRoleName), + testLookupRead(t, data, resp4), + testRoleDelete(t, testDynamicRoleName), testLookupRead(t, data, resp1), - testRoleWrite(t, testAtRoleName, testOTPRoleData), - testLookupRead(t, data, resp3), + testRoleWrite(t, testAtRoleName, testDynamicRoleData), + testLookupRead(t, data, resp5), testRoleDelete(t, testAtRoleName), testLookupRead(t, data, resp1), }, @@ -604,6 +605,39 @@ func TestSSHBackend_RoleList(t *testing.T) { }) } +func TestSSHBackend_DynamicKeyCreate(t *testing.T) { + cleanup, sshAddress := prepareTestContainer(t, "", "") + defer cleanup() + + host, port, err := net.SplitHostPort(sshAddress) + if err != nil { + t.Fatal(err) + } + + testDynamicRoleData := map[string]interface{}{ + "key_type": testDynamicKeyType, + "key": testKeyName, + "admin_user": testAdminUser, + "default_user": testAdminUser, + "cidr_list": host + "/32", + "port": port, + } + data := map[string]interface{}{ + "username": testUserName, + "ip": host, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), + testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), + testCredsWrite(t, testDynamicRoleName, data, false, sshAddress), + testRoleWrite(t, testAtRoleName, testDynamicRoleData), + testCredsWrite(t, testAtRoleName, data, false, sshAddress), + }, + }) +} + func TestSSHBackend_OTPRoleCrud(t *testing.T) { testOTPRoleData := map[string]interface{}{ "key_type": testOTPKeyType, @@ -631,6 +665,50 @@ func TestSSHBackend_OTPRoleCrud(t *testing.T) { }) } +func TestSSHBackend_DynamicRoleCrud(t *testing.T) { + testDynamicRoleData := map[string]interface{}{ + "key_type": testDynamicKeyType, + "key": testKeyName, + "admin_user": testAdminUser, + "default_user": testAdminUser, + "cidr_list": testCIDRList, + } + respDynamicRoleData := map[string]interface{}{ + "cidr_list": testCIDRList, + "port": 22, + "install_script": DefaultPublicKeyInstallScript, + "key_bits": 1024, + "key": testKeyName, + "admin_user": testUserName, + "default_user": testUserName, + "key_type": testDynamicKeyType, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), + testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), + testRoleRead(t, testDynamicRoleName, respDynamicRoleData), + testRoleDelete(t, testDynamicRoleName), + testRoleRead(t, testDynamicRoleName, nil), + testRoleWrite(t, testAtRoleName, testDynamicRoleData), + testRoleRead(t, testAtRoleName, respDynamicRoleData), + testRoleDelete(t, testAtRoleName), + testRoleRead(t, testAtRoleName, nil), + }, + }) +} + +func TestSSHBackend_NamedKeysCrud(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), + testNamedKeysDelete(t), + }, + }) +} + func TestSSHBackend_OTPCreate(t *testing.T) { cleanup, sshAddress := prepareTestContainer(t, "", "") defer func() { @@ -684,14 +762,24 @@ func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) { "default_user": testUserName, "cidr_list": testCIDRList, } + testDynamicRoleData := map[string]interface{}{ + "key_type": testDynamicKeyType, + "key": testKeyName, + "admin_user": testAdminUser, + "default_user": testAdminUser, + "cidr_list": testCIDRList, + } req1 := map[string]interface{}{ "roles": testOTPRoleName, } resp1 := map[string]interface{}{ "roles": []string{testOTPRoleName}, } + req2 := map[string]interface{}{ + "roles": fmt.Sprintf("%s,%s", testOTPRoleName, testDynamicRoleName), + } resp2 := map[string]interface{}{ - "roles": []string{testOTPRoleName}, + "roles": []string{testOTPRoleName, testDynamicRoleName}, } resp3 := map[string]interface{}{ "roles": []string{}, @@ -703,7 +791,11 @@ func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) { testRoleWrite(t, testOTPRoleName, testOTPRoleData), testConfigZeroAddressWrite(t, req1), testConfigZeroAddressRead(t, resp1), + testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), + testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), + testConfigZeroAddressWrite(t, req2), testConfigZeroAddressRead(t, resp2), + testRoleDelete(t, testDynamicRoleName), testConfigZeroAddressRead(t, resp1), testRoleDelete(t, testOTPRoleName), testConfigZeroAddressRead(t, resp3), @@ -737,6 +829,43 @@ func TestSSHBackend_CredsForZeroAddressRoles_otp(t *testing.T) { }) } +func TestSSHBackend_CredsForZeroAddressRoles_dynamic(t *testing.T) { + cleanup, sshAddress := prepareTestContainer(t, "", "") + defer cleanup() + + host, port, err := net.SplitHostPort(sshAddress) + if err != nil { + t.Fatal(err) + } + + dynamicRoleData := map[string]interface{}{ + "key_type": testDynamicKeyType, + "key": testKeyName, + "admin_user": testAdminUser, + "default_user": testAdminUser, + "port": port, + } + data := map[string]interface{}{ + "username": testUserName, + "ip": host, + } + req2 := map[string]interface{}{ + "roles": testDynamicRoleName, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), + testRoleWrite(t, testDynamicRoleName, dynamicRoleData), + testCredsWrite(t, testDynamicRoleName, data, true, sshAddress), + testConfigZeroAddressWrite(t, req2), + testCredsWrite(t, testDynamicRoleName, data, false, sshAddress), + testConfigZeroAddressDelete(t), + testCredsWrite(t, testDynamicRoleName, data, true, sshAddress), + }, + }) +} + func TestSSHBackend_CA(t *testing.T) { testCases := []struct { name string @@ -948,63 +1077,12 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== return nil }, }, - testIssueCert("testcarole", "ec", testUserName, sshAddress, expectError), - testIssueCert("testcarole", "ed25519", testUserName, sshAddress, expectError), - testIssueCert("testcarole", "rsa", testUserName, sshAddress, expectError), }, } logicaltest.Test(t, testCase) } -func testIssueCert(role string, keyType string, testUserName string, sshAddress string, expectError bool) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "issue/" + role, - ErrorOk: expectError, - Data: map[string]interface{}{ - "key_type": keyType, - "valid_principals": testUserName, - }, - - Check: func(resp *logical.Response) error { - // Tolerate nil response if an error was expected - if expectError && resp == nil { - return nil - } - - signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) - if signedKey == "" { - return errors.New("no signed key in response") - } - - privKey, err := ssh.ParsePrivateKey([]byte(resp.Data["private_key"].(string))) - if err != nil { - return fmt.Errorf("error parsing private key: %v", err) - } - - parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(signedKey)) - if err != nil { - return fmt.Errorf("error parsing signed key: %v", err) - } - certSigner, err := ssh.NewCertSigner(parsedKey.(*ssh.Certificate), privKey) - if err != nil { - return err - } - - err = testSSH(testUserName, sshAddress, ssh.PublicKeys(certSigner), "date") - if expectError && err == nil { - return fmt.Errorf("expected error but got none") - } - if !expectError && err != nil { - return err - } - - return nil - }, - } -} - func TestSSHBackend_CAUpgradeAlgorithmSigner(t *testing.T) { cleanup, sshAddress := prepareTestContainer(t, dockerImageTagSupportsRSA1, testCAPublicKey) defer cleanup() @@ -2326,6 +2404,23 @@ func testVerifyWrite(t *testing.T, data map[string]interface{}, expected map[str } } +func testNamedKeysWrite(t *testing.T, name, key string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s", name), + Data: map[string]interface{}{ + "key": key, + }, + } +} + +func testNamedKeysDelete(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: fmt.Sprintf("keys/%s", testKeyName), + } +} + func testLookupRead(t *testing.T, data map[string]interface{}, expected []string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, @@ -2390,6 +2485,10 @@ func testRoleRead(t *testing.T, roleName string, expected map[string]interface{} if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] { return fmt.Errorf("data mismatch. bad: %#v", resp) } + case "dynamic": + if d.AdminUser != expected["admin_user"] || d.CIDRList != expected["cidr_list"] || d.KeyName != expected["key"] || d.KeyType != expected["key_type"] { + return fmt.Errorf("data mismatch. bad: %#v", resp) + } default: return fmt.Errorf("unknown key type. bad: %#v", resp) } @@ -2430,7 +2529,7 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, } return nil } - if roleName == testAtRoleName { + if roleName == testDynamicRoleName || roleName == testAtRoleName { var d struct { Key string `mapstructure:"key"` } @@ -2460,382 +2559,3 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, }, } } - -func TestBackend_CleanupDynamicHostKeys(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - - b, err := Backend(config) - if err != nil { - t.Fatal(err) - } - err = b.Setup(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - // Running on a clean mount shouldn't do anything. - cleanRequest := &logical.Request{ - Operation: logical.DeleteOperation, - Path: "tidy/dynamic-keys", - Storage: config.StorageView, - } - - resp, err := b.HandleRequest(context.Background(), cleanRequest) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotNil(t, resp.Data["message"]) - require.Contains(t, resp.Data["message"], "0 of 0") - - // Write a bunch of bogus entries. - for i := 0; i < 15; i++ { - data := map[string]interface{}{ - "host": "localhost", - "key": "nothing-to-see-here", - } - entry, err := logical.StorageEntryJSON(fmt.Sprintf("%vexample-%v", keysStoragePrefix, i), &data) - require.NoError(t, err) - err = config.StorageView.Put(context.Background(), entry) - require.NoError(t, err) - } - - // Should now have 15 - resp, err = b.HandleRequest(context.Background(), cleanRequest) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotNil(t, resp.Data["message"]) - require.Contains(t, resp.Data["message"], "15 of 15") - - // Should have none left. - resp, err = b.HandleRequest(context.Background(), cleanRequest) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotNil(t, resp.Data["message"]) - require.Contains(t, resp.Data["message"], "0 of 0") -} - -type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) - -func isPermDenied(err error) bool { - return strings.Contains(err.Error(), "permission denied") -} - -func isUnsupportedPathOperation(err error) bool { - return strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation") -} - -func isDeniedOp(err error) bool { - return isPermDenied(err) || isUnsupportedPathOperation(err) -} - -func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { - client.SetToken("") - resp, err := client.Logical().ReadWithContext(ctx, path) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) - } -} - -func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { - // Should be able to read both with and without a token. - client.SetToken("") - resp, err := client.Logical().ReadWithContext(ctx, path) - if err != nil && isPermDenied(err) { - // Read will sometimes return permission denied, when the handler - // does not support the given operation. Retry with the token. - client.SetToken(token) - resp2, err2 := client.Logical().ReadWithContext(ctx, path) - if err2 != nil && !isUnsupportedPathOperation(err2) { - t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) - } - client.SetToken("") - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err != nil && isPermDenied(err) { - // List will sometimes return permission denied, when the handler - // does not support the given operation. Retry with the token. - client.SetToken(token) - resp2, err2 := client.Logical().ListWithContext(ctx, path) - if err2 != nil && !isUnsupportedPathOperation(err2) { - t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) - } - client.SetToken("") - } - - // These should all be denied. - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) - } - - // Retrying with token should allow read/list, but not modification still. - client.SetToken(token) - resp, err = client.Logical().ReadWithContext(ctx, path) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) - } - - // Should all be denied. - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) - } -} - -func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { - client.SetToken("") - resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) - } - - // These should all be denied. - resp, err = client.Logical().ReadWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) - } - - // Retrying with token should allow writing, but nothing else. - client.SetToken(token) - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) - } - - // These should all be denied. - resp, err = client.Logical().ReadWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - if resp != nil || err != nil { - t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) - } - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) - } -} - -type pathAuthChecker int - -const ( - shouldBeAuthed pathAuthChecker = iota - shouldBeUnauthedReadList - shouldBeUnauthedWriteOnly -) - -var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ - shouldBeAuthed: pathShouldBeAuthed, - shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, - shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, -} - -func TestProperAuthing(t *testing.T) { - t.Parallel() - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "ssh": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - token := client.Token() - - // Mount SSH. - err := client.Sys().MountWithContext(ctx, "ssh", &api.MountInput{ - Type: "ssh", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - if err != nil { - t.Fatal(err) - } - - // Setup basic configuration. - _, err = client.Logical().WriteWithContext(ctx, "ssh/config/ca", map[string]interface{}{ - "generate_signing_key": true, - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-ca", map[string]interface{}{ - "key_type": "ca", - "allow_user_certificates": true, - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().WriteWithContext(ctx, "ssh/issue/test-ca", map[string]interface{}{ - "username": "toor", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-otp", map[string]interface{}{ - "key_type": "otp", - "default_user": "toor", - "cidr_list": "127.0.0.0/24", - }) - if err != nil { - t.Fatal(err) - } - - resp, err := client.Logical().WriteWithContext(ctx, "ssh/creds/test-otp", map[string]interface{}{ - "username": "toor", - "ip": "127.0.0.1", - }) - if err != nil || resp == nil { - t.Fatal(err) - } - // key := resp.Data["key"].(string) - - paths := map[string]pathAuthChecker{ - "config/ca": shouldBeAuthed, - "config/zeroaddress": shouldBeAuthed, - "creds/test-otp": shouldBeAuthed, - "issue/test-ca": shouldBeAuthed, - "lookup": shouldBeAuthed, - "public_key": shouldBeUnauthedReadList, - "roles/test-ca": shouldBeAuthed, - "roles/test-otp": shouldBeAuthed, - "roles": shouldBeAuthed, - "sign/test-ca": shouldBeAuthed, - "tidy/dynamic-keys": shouldBeAuthed, - "verify": shouldBeUnauthedWriteOnly, - } - for path, checkerType := range paths { - checker := pathAuthChckerMap[checkerType] - checker(t, client, "ssh/"+path, token) - } - - client.SetToken(token) - openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") - if err != nil { - t.Fatalf("failed to get openapi data: %v", err) - } - - if len(openAPIResp.Data["paths"].(map[string]interface{})) == 0 { - t.Fatalf("expected to get response from OpenAPI; got empty path list") - } - - validatedPath := false - for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { - if !strings.HasPrefix(openapi_path, "/ssh/") { - t.Logf("Skipping path: %v", openapi_path) - continue - } - - t.Logf("Validating path: %v", openapi_path) - validatedPath = true - - // Substitute values in from our testing map. - raw_path := openapi_path[5:] - if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "roles/") { - raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") - } - if strings.Contains(raw_path, "{role}") && (strings.Contains(raw_path, "sign/") || strings.Contains(raw_path, "issue/")) { - raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") - } - if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "creds") { - raw_path = strings.ReplaceAll(raw_path, "{role}", "test-otp") - } - - handler, present := paths[raw_path] - if !present { - t.Fatalf("OpenAPI reports SSH mount contains %v->%v but was not tested to be authed or authed.", openapi_path, raw_path) - } - - openapi_data := raw_data.(map[string]interface{}) - hasList := false - rawGetData, hasGet := openapi_data["get"] - if hasGet { - getData := rawGetData.(map[string]interface{}) - getParams, paramsPresent := getData["parameters"].(map[string]interface{}) - if getParams != nil && paramsPresent { - if _, hasList = getParams["list"]; hasList { - // LIST is exclusive from GET on the same endpoint usually. - hasGet = false - } - } - } - _, hasPost := openapi_data["post"] - _, hasDelete := openapi_data["delete"] - - if handler == shouldBeUnauthedReadList { - if hasPost || hasDelete { - t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities") - } - } - } - - if !validatedPath { - t.Fatalf("Expected to have validated at least one path.") - } -} diff --git a/builtin/logical/ssh/cmd/ssh/main.go b/builtin/logical/ssh/cmd/ssh/main.go index a9cf8b2696968..d04bd30af67e2 100644 --- a/builtin/logical/ssh/cmd/ssh/main.go +++ b/builtin/logical/ssh/cmd/ssh/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: ssh.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/ssh/communicator.go b/builtin/logical/ssh/communicator.go new file mode 100644 index 0000000000000..8950c41e1cec4 --- /dev/null +++ b/builtin/logical/ssh/communicator.go @@ -0,0 +1,350 @@ +package ssh + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + + log "github.com/hashicorp/go-hclog" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +type comm struct { + client *ssh.Client + config *SSHCommConfig + conn net.Conn + address string +} + +// SSHCommConfig is the structure used to configure the SSH communicator. +type SSHCommConfig struct { + // The configuration of the Go SSH connection + SSHConfig *ssh.ClientConfig + + // Connection returns a new connection. The current connection + // in use will be closed as part of the Close method, or in the + // case an error occurs. + Connection func() (net.Conn, error) + + // Pty, if true, will request a pty from the remote end. + Pty bool + + // DisableAgent, if true, will not forward the SSH agent. + DisableAgent bool + + // Logger for output + Logger log.Logger +} + +// Creates a new communicator implementation over SSH. This takes +// an already existing TCP connection and SSH configuration. +func SSHCommNew(address string, config *SSHCommConfig) (result *comm, err error) { + // Establish an initial connection and connect + result = &comm{ + config: config, + address: address, + } + + if err = result.reconnect(); err != nil { + result = nil + return + } + + return +} + +func (c *comm) Close() error { + var err error + if c.conn != nil { + err = c.conn.Close() + } + c.conn = nil + c.client = nil + return err +} + +func (c *comm) Upload(path string, input io.Reader, fi *os.FileInfo) error { + // The target directory and file for talking the SCP protocol + target_dir := filepath.Dir(path) + target_file := filepath.Base(path) + + // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). + // This does not work when the target host is unix. Switch to forward slash + // which works for unix and windows + target_dir = filepath.ToSlash(target_dir) + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(target_file, input, w, stdoutR, fi) + } + + return c.scpSession("scp -vt "+target_dir, scpFunc) +} + +func (c *comm) NewSession() (session *ssh.Session, err error) { + if c.client == nil { + err = errors.New("client not available") + } else { + session, err = c.client.NewSession() + } + + if err != nil { + c.config.Logger.Error("ssh session open error, attempting reconnect", "error", err) + if err := c.reconnect(); err != nil { + c.config.Logger.Error("reconnect attempt failed", "error", err) + return nil, err + } + + return c.client.NewSession() + } + + return session, nil +} + +func (c *comm) reconnect() error { + // Close previous connection. + if c.conn != nil { + c.Close() + } + + var err error + c.conn, err = c.config.Connection() + if err != nil { + // Explicitly set this to the REAL nil. Connection() can return + // a nil implementation of net.Conn which will make the + // "if c.conn == nil" check fail above. Read here for more information + // on this psychotic language feature: + // + // http://golang.org/doc/faq#nil_error + c.conn = nil + c.config.Logger.Error("reconnection error", "error", err) + return err + } + + sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig) + if err != nil { + c.config.Logger.Error("handshake error", "error", err) + c.Close() + return err + } + if sshConn != nil { + c.client = ssh.NewClient(sshConn, sshChan, req) + } + c.connectToAgent() + + return nil +} + +func (c *comm) connectToAgent() { + if c.client == nil { + return + } + + if c.config.DisableAgent { + return + } + + // open connection to the local agent + socketLocation := os.Getenv("SSH_AUTH_SOCK") + if socketLocation == "" { + return + } + agentConn, err := net.Dial("unix", socketLocation) + if err != nil { + c.config.Logger.Error("could not connect to local agent socket", "socket_path", socketLocation) + return + } + defer agentConn.Close() + + // create agent and add in auth + forwardingAgent := agent.NewClient(agentConn) + if forwardingAgent == nil { + c.config.Logger.Error("could not create agent client") + return + } + + // add callback for forwarding agent to SSH config + // XXX - might want to handle reconnects appending multiple callbacks + auth := ssh.PublicKeysCallback(forwardingAgent.Signers) + c.config.SSHConfig.Auth = append(c.config.SSHConfig.Auth, auth) + agent.ForwardToAgent(c.client, forwardingAgent) + + // Setup a session to request agent forwarding + session, err := c.NewSession() + if err != nil { + return + } + defer session.Close() + + err = agent.RequestAgentForwarding(session) + if err != nil { + c.config.Logger.Error("error requesting agent forwarding", "error", err) + return + } + return +} + +func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { + session, err := c.NewSession() + if err != nil { + return err + } + defer session.Close() + + // Get a pipe to stdin so that we can send data down + stdinW, err := session.StdinPipe() + if err != nil { + return err + } + + // We only want to close once, so we nil w after we close it, + // and only close in the defer if it hasn't been closed already. + defer func() { + if stdinW != nil { + stdinW.Close() + } + }() + + // Get a pipe to stdout so that we can get responses back + stdoutPipe, err := session.StdoutPipe() + if err != nil { + return err + } + stdoutR := bufio.NewReader(stdoutPipe) + + // Set stderr to a bytes buffer + stderr := new(bytes.Buffer) + session.Stderr = stderr + + // Start the sink mode on the other side + if err := session.Start(scpCommand); err != nil { + return err + } + + // Call our callback that executes in the context of SCP. We ignore + // EOF errors if they occur because it usually means that SCP prematurely + // ended on the other side. + if err := f(stdinW, stdoutR); err != nil && err != io.EOF { + return err + } + + // Close the stdin, which sends an EOF, and then set w to nil so that + // our defer func doesn't close it again since that is unsafe with + // the Go SSH package. + stdinW.Close() + stdinW = nil + + // Wait for the SCP connection to close, meaning it has consumed all + // our data and has completed. Or has errored. + err = session.Wait() + if err != nil { + if exitErr, ok := err.(*ssh.ExitError); ok { + // Otherwise, we have an ExitErorr, meaning we can just read + // the exit status + c.config.Logger.Error("got non-zero exit status", "exit_status", exitErr.ExitStatus()) + + // If we exited with status 127, it means SCP isn't available. + // Return a more descriptive error for that. + if exitErr.ExitStatus() == 127 { + return errors.New( + "SCP failed to start. This usually means that SCP is not\n" + + "properly installed on the remote system.") + } + } + + return err + } + return nil +} + +// checkSCPStatus checks that a prior command sent to SCP completed +// successfully. If it did not complete successfully, an error will +// be returned. +func checkSCPStatus(r *bufio.Reader) error { + code, err := r.ReadByte() + if err != nil { + return err + } + + if code != 0 { + // Treat any non-zero (really 1 and 2) as fatal errors + message, _, err := r.ReadLine() + if err != nil { + return fmt.Errorf("error reading error message: %w", err) + } + + return errors.New(string(message)) + } + + return nil +} + +func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *os.FileInfo) error { + var mode os.FileMode + var size int64 + + if fi != nil && (*fi).Mode().IsRegular() { + mode = (*fi).Mode().Perm() + size = (*fi).Size() + } else { + // Create a temporary file where we can copy the contents of the src + // so that we can determine the length, since SCP is length-prefixed. + tf, err := ioutil.TempFile("", "vault-ssh-upload") + if err != nil { + return fmt.Errorf("error creating temporary file for upload: %w", err) + } + defer os.Remove(tf.Name()) + defer tf.Close() + + mode = 0o644 + + if _, err := io.Copy(tf, src); err != nil { + return err + } + + // Sync the file so that the contents are definitely on disk, then + // read the length of it. + if err := tf.Sync(); err != nil { + return fmt.Errorf("error creating temporary file for upload: %w", err) + } + + // Seek the file to the beginning so we can re-read all of it + if _, err := tf.Seek(0, 0); err != nil { + return fmt.Errorf("error creating temporary file for upload: %w", err) + } + + tfi, err := tf.Stat() + if err != nil { + return fmt.Errorf("error creating temporary file for upload: %w", err) + } + + size = tfi.Size() + src = tf + } + + // Start the protocol + perms := fmt.Sprintf("C%04o", mode) + + fmt.Fprintln(w, perms, size, dst) + if err := checkSCPStatus(r); err != nil { + return err + } + + if _, err := io.CopyN(w, src, size); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + if err := checkSCPStatus(r); err != nil { + return err + } + + return nil +} diff --git a/builtin/logical/ssh/linux_install_script.go b/builtin/logical/ssh/linux_install_script.go new file mode 100644 index 0000000000000..a2228b2fc2e05 --- /dev/null +++ b/builtin/logical/ssh/linux_install_script.go @@ -0,0 +1,71 @@ +package ssh + +const ( + // This is a constant representing a script to install and uninstall public + // key in remote hosts. + DefaultPublicKeyInstallScript = ` +#!/bin/bash +# +# This is a default script which installs or uninstalls an RSA public key to/from +# authorized_keys file in a typical linux machine. +# +# If the platform differs or if the binaries used in this script are not available +# in target machine, use the 'install_script' parameter with 'roles/' endpoint to +# register a custom script (applicable for Dynamic type only). +# +# Vault server runs this script on the target machine with the following params: +# +# $1:INSTALL_OPTION: "install" or "uninstall" +# +# $2:PUBLIC_KEY_FILE: File name containing public key to be installed. Vault server +# uses UUID as name to avoid collisions with public keys generated for other requests. +# +# $3:AUTH_KEYS_FILE: Absolute path of the authorized_keys file. +# Currently, vault uses /home//.ssh/authorized_keys as the path. +# +# [Note: This script will be run by Vault using the registered admin username. +# Notice that some commands below are run as 'sudo'. For graceful execution of +# this script there should not be any password prompts. So, disable password +# prompt for the admin username registered with Vault. + +set -e + +# Storing arguments into variables, to increase readability of the script. +INSTALL_OPTION=$1 +PUBLIC_KEY_FILE=$2 +AUTH_KEYS_FILE=$3 + +# Delete the public key file and the temporary file +function cleanup +{ + rm -f "$PUBLIC_KEY_FILE" temp_$PUBLIC_KEY_FILE +} + +# 'cleanup' will be called if the script ends or if any command fails. +trap cleanup EXIT + +# Return if the option is anything other than 'install' or 'uninstall'. +if [ "$INSTALL_OPTION" != "install" ] && [ "$INSTALL_OPTION" != "uninstall" ]; then + exit 1 +fi + +# use locking to avoid parallel script execution +( + flock --timeout 10 200 + # Create the .ssh directory and authorized_keys file if it does not exist + SSH_DIR=$(dirname $AUTH_KEYS_FILE) + sudo mkdir -p "$SSH_DIR" + sudo touch "$AUTH_KEYS_FILE" + # Remove the key from authorized_keys file if it is already present. + # This step is common for both install and uninstall. Note that grep's + # return code is ignored, thus if grep fails all keys will be removed + # rather than none and it fails secure + sudo grep -vFf "$PUBLIC_KEY_FILE" "$AUTH_KEYS_FILE" > temp_$PUBLIC_KEY_FILE || true + cat temp_$PUBLIC_KEY_FILE | sudo tee "$AUTH_KEYS_FILE" + # Append the new public key to authorized_keys file + if [ "$INSTALL_OPTION" == "install" ]; then + cat "$PUBLIC_KEY_FILE" | sudo tee --append "$AUTH_KEYS_FILE" + fi +) 200> ${AUTH_KEYS_FILE}.lock +` +) diff --git a/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go deleted file mode 100644 index 5ae2afc8e88d3..0000000000000 --- a/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package ssh - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const keysStoragePrefix = "keys/" - -func pathCleanupKeys(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "tidy/dynamic-keys", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationVerb: "tidy", - OperationSuffix: "dynamic-host-keys", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.DeleteOperation: b.handleCleanupKeys, - }, - HelpSynopsis: `This endpoint removes the stored host keys used for the removed Dynamic Key feature, if present.`, - HelpDescription: `For more information, refer to the API documentation.`, - } -} - -func (b *backend) handleCleanupKeys(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - names, err := req.Storage.List(ctx, keysStoragePrefix) - if err != nil { - return nil, fmt.Errorf("unable to list keys for removal: %w", err) - } - - for index, name := range names { - keyPath := keysStoragePrefix + name - if err := req.Storage.Delete(ctx, keyPath); err != nil { - return nil, fmt.Errorf("unable to delete key %v of %v: %w", index+1, len(names), err) - } - } - - return &logical.Response{ - Data: map[string]interface{}{ - "message": fmt.Sprintf("Removed %v of %v host keys.", len(names), len(names)), - }, - }, nil -} diff --git a/builtin/logical/ssh/path_config_ca.go b/builtin/logical/ssh/path_config_ca.go index 3fa890c29358d..2fd76c12f9109 100644 --- a/builtin/logical/ssh/path_config_ca.go +++ b/builtin/logical/ssh/path_config_ca.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -41,11 +38,6 @@ type keyStorageEntry struct { func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - }, - Fields: map[string]*framework.FieldSchema{ "private_key": { Type: framework.TypeString, @@ -72,26 +64,10 @@ func pathConfigCA(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigCAUpdate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "ca", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathConfigCADelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "ca-configuration", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigCARead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "ca-configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConfigCAUpdate, + logical.DeleteOperation: b.pathConfigCADelete, + logical.ReadOperation: b.pathConfigCARead, }, HelpSynopsis: `Set the SSH private key used for signing certificates.`, diff --git a/builtin/logical/ssh/path_config_ca_test.go b/builtin/logical/ssh/path_config_ca_test.go index 4c33fc80892dc..651ed42ce0fb0 100644 --- a/builtin/logical/ssh/path_config_ca_test.go +++ b/builtin/logical/ssh/path_config_ca_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( diff --git a/builtin/logical/ssh/path_config_zeroaddress.go b/builtin/logical/ssh/path_config_zeroaddress.go index 773e9b36fbe0a..d1e31e234df12 100644 --- a/builtin/logical/ssh/path_config_zeroaddress.go +++ b/builtin/logical/ssh/path_config_zeroaddress.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -21,11 +18,6 @@ type zeroAddressRoles struct { func pathConfigZeroAddress(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/zeroaddress", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - }, - Fields: map[string]*framework.FieldSchema{ "roles": { Type: framework.TypeCommaStringSlice, @@ -34,27 +26,10 @@ func pathConfigZeroAddress(b *backend) *framework.Path { previously registered under these roles will be ignored.`, }, }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigZeroAddressWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "zero-address", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigZeroAddressRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "zero-address-configuration", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathConfigZeroAddressDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "zero-address-configuration", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConfigZeroAddressWrite, + logical.ReadOperation: b.pathConfigZeroAddressRead, + logical.DeleteOperation: b.pathConfigZeroAddressDelete, }, HelpSynopsis: pathConfigZeroAddressSyn, HelpDescription: pathConfigZeroAddressDesc, diff --git a/builtin/logical/ssh/path_creds_create.go b/builtin/logical/ssh/path_creds_create.go index b95c1f3752e17..6a644ab881f92 100644 --- a/builtin/logical/ssh/path_creds_create.go +++ b/builtin/logical/ssh/path_creds_create.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -23,13 +20,6 @@ type sshOTP struct { func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameWithAtRegex("role"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationVerb: "generate", - OperationSuffix: "credentials", - }, - Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -44,11 +34,9 @@ func pathCredsCreate(b *backend) *framework.Path { Description: "[Required] IP of the remote host", }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathCredsCreateWrite, }, - HelpSynopsis: pathCredsCreateHelpSyn, HelpDescription: pathCredsCreateHelpDesc, } @@ -147,7 +135,30 @@ func (b *backend) pathCredsCreateWrite(ctx context.Context, req *logical.Request "otp": otp, }) } else if role.KeyType == KeyTypeDynamic { - return nil, fmt.Errorf("dynamic key types have been removed") + // Generate an RSA key pair. This also installs the newly generated + // public key in the remote host. + dynamicPublicKey, dynamicPrivateKey, err := b.GenerateDynamicCredential(ctx, req, role, username, ip) + if err != nil { + return nil, err + } + + // Return the information relevant to user of dynamic type and save + // information required for later use in internal section of secret. + result = b.Secret(SecretDynamicKeyType).Response(map[string]interface{}{ + "key": dynamicPrivateKey, + "key_type": role.KeyType, + "username": username, + "ip": ip, + "port": role.Port, + }, map[string]interface{}{ + "admin_user": role.AdminUser, + "username": username, + "ip": ip, + "host_key_name": role.KeyName, + "dynamic_public_key": dynamicPublicKey, + "port": role.Port, + "install_script": role.InstallScript, + }) } else { return nil, fmt.Errorf("key type unknown") } @@ -155,6 +166,41 @@ func (b *backend) pathCredsCreateWrite(ctx context.Context, req *logical.Request return result, nil } +// Generates a RSA key pair and installs it in the remote target +func (b *backend) GenerateDynamicCredential(ctx context.Context, req *logical.Request, role *sshRole, username, ip string) (string, string, error) { + // Fetch the host key to be used for dynamic key installation + keyEntry, err := req.Storage.Get(ctx, fmt.Sprintf("keys/%s", role.KeyName)) + if err != nil { + return "", "", fmt.Errorf("key %q not found: %w", role.KeyName, err) + } + + if keyEntry == nil { + return "", "", fmt.Errorf("key %q not found", role.KeyName) + } + + var hostKey sshHostKey + if err := keyEntry.DecodeJSON(&hostKey); err != nil { + return "", "", fmt.Errorf("error reading the host key: %w", err) + } + + // Generate a new RSA key pair with the given key length. + dynamicPublicKey, dynamicPrivateKey, err := generateRSAKeys(role.KeyBits) + if err != nil { + return "", "", fmt.Errorf("error generating key: %w", err) + } + + if len(role.KeyOptionSpecs) != 0 { + dynamicPublicKey = fmt.Sprintf("%s %s", role.KeyOptionSpecs, dynamicPublicKey) + } + + // Add the public key to authorized_keys file in target machine + err = b.installPublicKeyInTarget(ctx, role.AdminUser, username, ip, role.Port, hostKey.Key, dynamicPublicKey, role.InstallScript, true) + if err != nil { + return "", "", fmt.Errorf("failed to add public key to authorized_keys file in target: %w", err) + } + return dynamicPublicKey, dynamicPrivateKey, nil +} + // Generates a UUID OTP and its salted value based on the salt of the backend. func (b *backend) GenerateSaltedOTP(ctx context.Context) (string, string, error) { str, err := uuid.GenerateUUID() @@ -273,8 +319,12 @@ Creates a credential for establishing SSH connection with the remote host. const pathCredsCreateHelpDesc = ` This path will generate a new key for establishing SSH session with -target host. The key can be a One Time Password (OTP) using 'key_type' -being 'otp'. +target host. The key can either be a long lived dynamic key or a One +Time Password (OTP), using 'key_type' parameter being 'dynamic' or +'otp' respectively. For dynamic keys, a named key should be supplied. +Create named key using the 'keys/' endpoint, and this represents the +shared SSH key of target host. If this backend is mounted at 'ssh', +then "ssh/creds/web" would generate a key for 'web' role. Keys will have a lease associated with them. The access keys can be revoked by using the lease ID. diff --git a/builtin/logical/ssh/path_fetch.go b/builtin/logical/ssh/path_fetch.go index 3a1fa5f297d4e..a04481e45573d 100644 --- a/builtin/logical/ssh/path_fetch.go +++ b/builtin/logical/ssh/path_fetch.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -14,17 +11,12 @@ func pathFetchPublicKey(b *backend) *framework.Path { return &framework.Path{ Pattern: `public_key`, - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationSuffix: "public-key", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathFetchPublicKey, }, HelpSynopsis: `Retrieve the public key.`, - HelpDescription: `This allows the public key of the SSH CA certificate that this backend has been configured with to be fetched. This is a raw response endpoint without JSON encoding; use -format=raw or an external tool (e.g., curl) to fetch this value.`, + HelpDescription: `This allows the public key, that this backend has been configured with, to be fetched.`, } } diff --git a/builtin/logical/ssh/path_issue.go b/builtin/logical/ssh/path_issue.go index b50e03ed77711..77b644590fd04 100644 --- a/builtin/logical/ssh/path_issue.go +++ b/builtin/logical/ssh/path_issue.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -22,12 +19,6 @@ func pathIssue(b *backend) *framework.Path { return &framework.Path{ Pattern: "issue/" + framework.GenericNameWithAtRegex("role"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationVerb: "issue", - OperationSuffix: "certificate", - }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssue, diff --git a/builtin/logical/ssh/path_issue_sign.go b/builtin/logical/ssh/path_issue_sign.go index c4e68e4721f09..e6f225ffdc6ff 100644 --- a/builtin/logical/ssh/path_issue_sign.go +++ b/builtin/logical/ssh/path_issue_sign.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -96,7 +93,7 @@ func (b *backend) pathSignIssueCertificateHelper(ctx context.Context, req *logic return logical.ErrorResponse(err.Error()), nil } - extensions, addExtTemplatingWarning, err := b.calculateExtensions(data, req, role) + extensions, err := b.calculateExtensions(data, req, role) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -143,10 +140,6 @@ func (b *backend) pathSignIssueCertificateHelper(ctx context.Context, req *logic }, } - if addExtTemplatingWarning { - response.AddWarning("default_extension templating enabled with at least one extension requiring identity templating. However, this request lacked identity entity information, causing one or more extensions to be skipped from the generated certificate.") - } - return response, nil } @@ -179,14 +172,18 @@ func (b *backend) calculateValidPrincipals(data *framework.FieldData, req *logic parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false) // Build list of allowed Principals from template and static principalsAllowedByRole var allowedPrincipals []string - if enableTemplating { - rendered, err := b.renderPrincipal(principalsAllowedByRole, req) - if err != nil { - return nil, err + for _, principal := range strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false) { + if enableTemplating { + rendered, err := b.renderPrincipal(principal, req) + if err != nil { + return nil, err + } + // Template returned a principal + allowedPrincipals = append(allowedPrincipals, rendered) + } else { + // Static principal + allowedPrincipals = append(allowedPrincipals, principal) } - allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(rendered, ","), false) - } else { - allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false) } switch { @@ -303,7 +300,7 @@ func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshR return criticalOptions, nil } -func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, bool, error) { +func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, error) { unparsedExtensions := data.Get("extensions").(map[string]interface{}) extensions := make(map[string]string) @@ -311,7 +308,7 @@ func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Re extensions := convertMapToStringValue(unparsedExtensions) if role.AllowedExtensions == "*" { // Allowed extensions was configured to allow all - return extensions, false, nil + return extensions, nil } notAllowed := []string{} @@ -323,13 +320,11 @@ func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Re } if len(notAllowed) != 0 { - return nil, false, fmt.Errorf("extensions %v are not on allowed list", notAllowed) + return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed) } - return extensions, false, nil + return extensions, nil } - haveMissingEntityInfoWithTemplatedExt := false - if role.DefaultExtensionsTemplate { for extensionKey, extensionValue := range role.DefaultExtensions { // Look for templating markers {{ .* }} @@ -342,10 +337,8 @@ func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Re // Template returned an extension value that we can use extensions[extensionKey] = templateExtensionValue } else { - return nil, false, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err) + return nil, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err) } - } else { - haveMissingEntityInfoWithTemplatedExt = true } } else { // Static extension value or err template @@ -356,7 +349,7 @@ func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Re extensions = role.DefaultExtensions } - return extensions, haveMissingEntityInfoWithTemplatedExt, nil + return extensions, nil } func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) { @@ -501,7 +494,7 @@ func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) { // prepare certificate for signing nonce := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce: %w", err) + return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce") } certificate := &ssh.Certificate{ Serial: serialNumber.Uint64(), diff --git a/builtin/logical/ssh/path_keys.go b/builtin/logical/ssh/path_keys.go new file mode 100644 index 0000000000000..6f0f7c9b2bdb0 --- /dev/null +++ b/builtin/logical/ssh/path_keys.go @@ -0,0 +1,110 @@ +package ssh + +import ( + "context" + "fmt" + + "golang.org/x/crypto/ssh" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type sshHostKey struct { + Key string `json:"key"` +} + +func pathKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("key_name"), + Fields: map[string]*framework.FieldSchema{ + "key_name": { + Type: framework.TypeString, + Description: "[Required] Name of the key", + }, + "key": { + Type: framework.TypeString, + Description: "[Required] SSH private key with super user privileges in host", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathKeysWrite, + logical.DeleteOperation: b.pathKeysDelete, + }, + HelpSynopsis: pathKeysSyn, + HelpDescription: pathKeysDesc, + } +} + +func (b *backend) getKey(ctx context.Context, s logical.Storage, n string) (*sshHostKey, error) { + entry, err := s.Get(ctx, "keys/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result sshHostKey + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + +func (b *backend) pathKeysDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + keyName := d.Get("key_name").(string) + keyPath := fmt.Sprintf("keys/%s", keyName) + err := req.Storage.Delete(ctx, keyPath) + if err != nil { + return nil, err + } + return nil, nil +} + +func (b *backend) pathKeysWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + keyName := d.Get("key_name").(string) + if keyName == "" { + return logical.ErrorResponse("Missing key_name"), nil + } + + keyString := d.Get("key").(string) + + // Check if the key provided is infact a private key + signer, err := ssh.ParsePrivateKey([]byte(keyString)) + if err != nil || signer == nil { + return logical.ErrorResponse("Invalid key"), nil + } + + if keyString == "" { + return logical.ErrorResponse("Missing key"), nil + } + + keyPath := fmt.Sprintf("keys/%s", keyName) + + // Store the key + entry, err := logical.StorageEntryJSON(keyPath, map[string]interface{}{ + "key": keyString, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + return nil, nil +} + +const pathKeysSyn = ` +Register a shared private key with Vault. +` + +const pathKeysDesc = ` +Vault uses this key to install and uninstall dynamic keys in remote hosts. This +key should have sudoer privileges in remote hosts. This enables installing keys +for unprivileged usernames. + +If this backend is mounted as "ssh", then the endpoint for registering shared +key is "ssh/keys/". The name given here can be associated with any number +of roles via the endpoint "ssh/roles/". +` diff --git a/builtin/logical/ssh/path_lookup.go b/builtin/logical/ssh/path_lookup.go index 8ea0b53ac127c..05b62af96afd7 100644 --- a/builtin/logical/ssh/path_lookup.go +++ b/builtin/logical/ssh/path_lookup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -15,24 +12,15 @@ import ( func pathLookup(b *backend) *framework.Path { return &framework.Path{ Pattern: "lookup", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationVerb: "list", - OperationSuffix: "roles-by-ip", - }, - Fields: map[string]*framework.FieldSchema{ "ip": { Type: framework.TypeString, Description: "[Required] IP address of remote host", }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathLookupWrite, }, - HelpSynopsis: pathLookupSyn, HelpDescription: pathLookupDesc, } diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index b16c1d762cdec..595839301b86c 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -20,7 +17,7 @@ import ( const ( // KeyTypeOTP is an key of type OTP KeyTypeOTP = "otp" - // KeyTypeDynamic is dynamic key type; removed. + // KeyTypeDynamic is dynamic key type KeyTypeDynamic = "dynamic" // KeyTypeCA is an key of type CA KeyTypeCA = "ca" @@ -35,19 +32,24 @@ const ( ) // Structure that represents a role in SSH backend. This is a common role structure -// for both OTP and CA roles. Not all the fields are mandatory for both type. +// for both OTP and Dynamic roles. Not all the fields are mandatory for both type. // Some are applicable for one and not for other. It doesn't matter. type sshRole struct { KeyType string `mapstructure:"key_type" json:"key_type"` + KeyName string `mapstructure:"key" json:"key"` + KeyBits int `mapstructure:"key_bits" json:"key_bits"` + AdminUser string `mapstructure:"admin_user" json:"admin_user"` DefaultUser string `mapstructure:"default_user" json:"default_user"` DefaultUserTemplate bool `mapstructure:"default_user_template" json:"default_user_template"` CIDRList string `mapstructure:"cidr_list" json:"cidr_list"` ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"` Port int `mapstructure:"port" json:"port"` + InstallScript string `mapstructure:"install_script" json:"install_script"` AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"` AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"` AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"` AllowedDomainsTemplate bool `mapstructure:"allowed_domains_template" json:"allowed_domains_template"` + KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"` MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"` TTL string `mapstructure:"ttl" json:"ttl"` DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"` @@ -72,11 +74,6 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationSuffix: "roles", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -89,12 +86,6 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("role"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationSuffix: "role", - }, - Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -102,10 +93,30 @@ func pathRoles(b *backend) *framework.Path { [Required for all types] Name of the role being created.`, }, + "key": { + Type: framework.TypeString, + Description: ` + [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] + Name of the registered key in Vault. Before creating the role, use the + 'keys/' endpoint to create a named key.`, + }, + "admin_user": { + Type: framework.TypeString, + Description: ` + [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] + Admin user at remote host. The shared key being registered should be + for this user and should have root privileges. Everytime a dynamic + credential is being generated for other users, Vault uses this admin + username to login to remote host and install the generated credential + for the other user.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Admin Username", + }, + }, "default_user": { Type: framework.TypeString, Description: ` - [Required for OTP type] [Optional for CA type] + [Required for Dynamic type] [Required for OTP type] [Optional for CA type] Default username for which a credential will be generated. When the endpoint 'creds/' is used without a username, this value will be used as default username.`, @@ -116,7 +127,7 @@ func pathRoles(b *backend) *framework.Path { "default_user_template": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, Default user can be specified using identity template policies. Non-templated users are also permitted. `, @@ -125,7 +136,7 @@ func pathRoles(b *backend) *framework.Path { "cidr_list": { Type: framework.TypeString, Description: ` - [Optional for OTP type] [Not applicable for CA type] + [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] Comma separated list of CIDR blocks for which the role is applicable for. CIDR blocks can belong to more than one role.`, DisplayAttrs: &framework.DisplayAttributes{ @@ -135,7 +146,7 @@ func pathRoles(b *backend) *framework.Path { "exclude_cidr_list": { Type: framework.TypeString, Description: ` - [Optional for OTP type] [Not applicable for CA type] + [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not accepted by the role. This is particularly useful when big CIDR blocks are being used by the role and certain parts of it needs to be kept out.`, @@ -146,7 +157,7 @@ func pathRoles(b *backend) *framework.Path { "port": { Type: framework.TypeInt, Description: ` - [Optional for OTP type] [Not applicable for CA type] + [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] Port number for SSH connection. Default is '22'. Port number does not play any role in creation of OTP. For 'otp' type, this is just a way to inform client about the port number to use. Port number will be @@ -159,13 +170,27 @@ func pathRoles(b *backend) *framework.Path { Type: framework.TypeString, Description: ` [Required for all types] - Type of key used to login to hosts. It can be either 'otp' or 'ca'. + Type of key used to login to hosts. It can be either 'otp', 'dynamic' or 'ca'. 'otp' type requires agent to be installed in remote hosts.`, - AllowedValues: []interface{}{"otp", "ca"}, + AllowedValues: []interface{}{"otp", "dynamic", "ca"}, DisplayAttrs: &framework.DisplayAttributes{ Value: "ca", }, }, + "key_bits": { + Type: framework.TypeInt, + Description: ` + [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] + Length of the RSA dynamic key in bits. It is 1024 by default or it can be 2048.`, + }, + "install_script": { + Type: framework.TypeString, + Description: ` + [Optional for Dynamic type] [Not-applicable for OTP type] [Not applicable for CA type] + Script used to install and uninstall public keys in the target machine. + The inbuilt default install script will be for Linux hosts. For sample + script, refer the project documentation website.`, + }, "allowed_users": { Type: framework.TypeString, Description: ` @@ -185,7 +210,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_users_template": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, Allowed users can be specified using identity template policies. Non-templated users are also permitted. `, @@ -194,7 +219,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_domains": { Type: framework.TypeString, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If this option is not specified, client can request for a signed certificate for any valid host. If only certain domains are allowed, then this list enforces it. `, @@ -202,16 +227,25 @@ func pathRoles(b *backend) *framework.Path { "allowed_domains_template": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, Allowed domains can be specified using identity template policies. Non-templated domains are also permitted. `, Default: false, }, + "key_option_specs": { + Type: framework.TypeString, + Description: ` + [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] + Comma separated option specifications which will be prefixed to RSA key in + authorized_keys file. Options should be valid and comply with authorized_keys + file format and should not contain spaces. + `, + }, "ttl": { Type: framework.TypeDurationSecond, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] The lease duration if no specific lease duration is requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to @@ -223,7 +257,7 @@ func pathRoles(b *backend) *framework.Path { "max_ttl": { Type: framework.TypeDurationSecond, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] The maximum allowed lease duration `, DisplayAttrs: &framework.DisplayAttributes{ @@ -233,7 +267,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_critical_options": { Type: framework.TypeString, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] A comma-separated list of critical options that certificates can have when signed. To allow any critical options, set this to an empty string. `, @@ -241,7 +275,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_extensions": { Type: framework.TypeString, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] A comma-separated list of extensions that certificates can have when signed. An empty list means that no extension overrides are allowed by an end-user; explicitly specify '*' to allow any extensions to be set. @@ -250,8 +284,8 @@ func pathRoles(b *backend) *framework.Path { "default_critical_options": { Type: framework.TypeMap, Description: ` - [Not applicable for OTP type] [Optional for CA type] - Critical options certificates should + [Not applicable for Dynamic type] [Not applicable for OTP type] + [Optional for CA type] Critical options certificates should have if none are provided when signing. This field takes in key value pairs in JSON format. Note that these are not restricted by "allowed_critical_options". Defaults to none. @@ -260,8 +294,8 @@ func pathRoles(b *backend) *framework.Path { "default_extensions": { Type: framework.TypeMap, Description: ` - [Not applicable for OTP type] [Optional for CA type] - Extensions certificates should have if + [Not applicable for Dynamic type] [Not applicable for OTP type] + [Optional for CA type] Extensions certificates should have if none are provided when signing. This field takes in key value pairs in JSON format. Note that these are not restricted by "allowed_extensions". Defaults to none. @@ -270,7 +304,7 @@ func pathRoles(b *backend) *framework.Path { "default_extensions_template": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, Default extension values can be specified using identity template policies. Non-templated extension values are also permitted. `, @@ -279,7 +313,7 @@ func pathRoles(b *backend) *framework.Path { "allow_user_certificates": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, certificates are allowed to be signed for use as a 'user'. `, Default: false, @@ -287,7 +321,7 @@ func pathRoles(b *backend) *framework.Path { "allow_host_certificates": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, certificates are allowed to be signed for use as a 'host'. `, Default: false, @@ -295,7 +329,7 @@ func pathRoles(b *backend) *framework.Path { "allow_bare_domains": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, host certificates that are requested are allowed to use the base domains listed in "allowed_domains", e.g. "example.com". This is a separate option as in some cases this can be considered a security threat. @@ -304,14 +338,14 @@ func pathRoles(b *backend) *framework.Path { "allow_subdomains": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains". `, }, "allow_user_key_ids": { Type: framework.TypeBool, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If true, users can override the key ID for a signed certificate with the "key_id" field. When false, the key ID will always be the token display name. The key ID is logged by the SSH server and can be useful for auditing. @@ -323,7 +357,7 @@ func pathRoles(b *backend) *framework.Path { "key_id_format": { Type: framework.TypeString, Description: ` - [Not applicable for OTP type] [Optional for CA type] + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] When supplied, this value specifies a custom format for the key id of a signed certificate. The following variables are available for use: '{{token_display_name}}' - The display name of the token used to make the request. '{{role_name}}' - The name of the role signing the request. @@ -336,18 +370,16 @@ func pathRoles(b *backend) *framework.Path { "allowed_user_key_lengths": { Type: framework.TypeMap, Description: ` - [Not applicable for OTP type] [Optional for CA type] - If set, allows the enforcement of key types and minimum key sizes to be signed. - `, + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + If set, allows the enforcement of key types and minimum key sizes to be signed. + `, }, "algorithm_signer": { Type: framework.TypeString, Description: ` - [Not applicable for OTP type] [Optional for CA type] When supplied, this value specifies a signing algorithm for the key. Possible values: ssh-rsa, rsa-sha2-256, rsa-sha2-512, default, or the empty string. `, - AllowedValues: []interface{}{"", DefaultAlgorithmSigner, ssh.SigAlgoRSA, ssh.SigAlgoRSASHA2256, ssh.SigAlgoRSASHA2512}, DisplayAttrs: &framework.DisplayAttributes{ Name: "Signing Algorithm", }, @@ -356,7 +388,6 @@ func pathRoles(b *backend) *framework.Path { Type: framework.TypeDurationSecond, Default: 30, Description: ` - [Not applicable for OTP type] [Optional for CA type] The duration that the SSH certificate should be backdated by at issuance.`, DisplayAttrs: &framework.DisplayAttributes{ Name: "Not before duration", @@ -382,7 +413,7 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse("missing role name"), nil } - // Allowed users is an optional field, applicable for both OTP and CA types. + // Allowed users is an optional field, applicable for both OTP and Dynamic types. allowedUsers := d.Get("allowed_users").(string) // Validate the CIDR blocks @@ -427,6 +458,13 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse("missing default user"), nil } + // Admin user is not used if OTP key type is used because there is + // no need to login to remote machine. + adminUser := d.Get("admin_user").(string) + if adminUser != "" { + return logical.ErrorResponse("admin user not required for OTP type"), nil + } + // Below are the only fields used from the role structure for OTP type. roleEntry = sshRole{ DefaultUser: defaultUser, @@ -438,7 +476,59 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr Version: roleEntryVersion, } } else if keyType == KeyTypeDynamic { - return logical.ErrorResponse("dynamic key type roles are no longer supported"), nil + defaultUser := d.Get("default_user").(string) + if defaultUser == "" { + return logical.ErrorResponse("missing default user"), nil + } + // Key name is required by dynamic type and not by OTP type. + keyName := d.Get("key").(string) + if keyName == "" { + return logical.ErrorResponse("missing key name"), nil + } + keyEntry, err := req.Storage.Get(ctx, fmt.Sprintf("keys/%s", keyName)) + if err != nil || keyEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("invalid 'key': %q", keyName)), nil + } + + installScript := d.Get("install_script").(string) + keyOptionSpecs := d.Get("key_option_specs").(string) + + // Setting the default script here. The script will install the + // generated public key in the authorized_keys file of linux host. + if installScript == "" { + installScript = DefaultPublicKeyInstallScript + } + + adminUser := d.Get("admin_user").(string) + if adminUser == "" { + return logical.ErrorResponse("missing admin username"), nil + } + + // This defaults to 2048, but it can also be 1024, 3072, 4096, or 8192. + // In the near future, we should disallow 1024-bit SSH keys. + keyBits := d.Get("key_bits").(int) + if keyBits == 0 { + keyBits = 2048 + } + if keyBits != 1024 && keyBits != 2048 && keyBits != 3072 && keyBits != 4096 && keyBits != 8192 { + return logical.ErrorResponse("invalid key_bits field"), nil + } + + // Store all the fields required by dynamic key type + roleEntry = sshRole{ + KeyName: keyName, + AdminUser: adminUser, + DefaultUser: defaultUser, + CIDRList: cidrList, + ExcludeCIDRList: excludeCidrList, + Port: port, + KeyType: KeyTypeDynamic, + KeyBits: keyBits, + InstallScript: installScript, + AllowedUsers: allowedUsers, + KeyOptionSpecs: keyOptionSpecs, + Version: roleEntryVersion, + } } else if keyType == KeyTypeCA { algorithmSigner := DefaultAlgorithmSigner algorithmSignerRaw, ok := d.GetOk("algorithm_signer") @@ -685,6 +775,7 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { "allow_user_key_ids": role.AllowUserKeyIDs, "key_id_format": role.KeyIDFormat, "key_type": role.KeyType, + "key_bits": role.KeyBits, "default_critical_options": role.DefaultCriticalOptions, "default_extensions": role.DefaultExtensions, "default_extensions_template": role.DefaultExtensionsTemplate, @@ -693,7 +784,23 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { "not_before_duration": int64(role.NotBeforeDuration.Seconds()), } case KeyTypeDynamic: - return nil, fmt.Errorf("dynamic key type roles are no longer supported") + result = map[string]interface{}{ + "key": role.KeyName, + "admin_user": role.AdminUser, + "default_user": role.DefaultUser, + "cidr_list": role.CIDRList, + "exclude_cidr_list": role.ExcludeCIDRList, + "port": role.Port, + "key_type": role.KeyType, + "key_bits": role.KeyBits, + "allowed_users": role.AllowedUsers, + "key_option_specs": role.KeyOptionSpecs, + // Returning install script will make the output look messy. + // But this is one way for clients to see the script that is + // being used to install the key. If there is some problem, + // the script can be modified and configured by clients. + "install_script": role.InstallScript, + } default: return nil, fmt.Errorf("invalid key type: %v", role.KeyType) } diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 36971ebf4933d..19196013e6d50 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -15,12 +12,6 @@ func pathSign(b *backend) *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameWithAtRegex("role"), - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationVerb: "sign", - OperationSuffix: "certificate", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathSign, }, diff --git a/builtin/logical/ssh/path_verify.go b/builtin/logical/ssh/path_verify.go index 906272281e568..7d9814751fd12 100644 --- a/builtin/logical/ssh/path_verify.go +++ b/builtin/logical/ssh/path_verify.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( @@ -14,11 +11,6 @@ import ( func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: "verify", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixSSH, - OperationVerb: "verify", - OperationSuffix: "otp", - }, Fields: map[string]*framework.FieldSchema{ "otp": { Type: framework.TypeString, diff --git a/builtin/logical/ssh/secret_dynamic_key.go b/builtin/logical/ssh/secret_dynamic_key.go new file mode 100644 index 0000000000000..80b9c5ca0e7c2 --- /dev/null +++ b/builtin/logical/ssh/secret_dynamic_key.go @@ -0,0 +1,70 @@ +package ssh + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +const SecretDynamicKeyType = "secret_dynamic_key_type" + +func secretDynamicKey(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretDynamicKeyType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username in host", + }, + "ip": { + Type: framework.TypeString, + Description: "IP address of host", + }, + }, + + Renew: b.secretDynamicKeyRenew, + Revoke: b.secretDynamicKeyRevoke, + } +} + +func (b *backend) secretDynamicKeyRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return &logical.Response{Secret: req.Secret}, nil +} + +func (b *backend) secretDynamicKeyRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + type sec struct { + AdminUser string `mapstructure:"admin_user"` + Username string `mapstructure:"username"` + IP string `mapstructure:"ip"` + HostKeyName string `mapstructure:"host_key_name"` + DynamicPublicKey string `mapstructure:"dynamic_public_key"` + InstallScript string `mapstructure:"install_script"` + Port int `mapstructure:"port"` + } + + intSec := &sec{} + err := mapstructure.Decode(req.Secret.InternalData, intSec) + if err != nil { + return nil, fmt.Errorf("secret internal data could not be decoded: %w", err) + } + + // Fetch the host key using the key name + hostKey, err := b.getKey(ctx, req.Storage, intSec.HostKeyName) + if err != nil { + return nil, fmt.Errorf("key %q not found error: %w", intSec.HostKeyName, err) + } + if hostKey == nil { + return nil, fmt.Errorf("key %q not found", intSec.HostKeyName) + } + + // Remove the public key from authorized_keys file in target machine + // The last param 'false' indicates that the key should be uninstalled. + err = b.installPublicKeyInTarget(ctx, intSec.AdminUser, intSec.Username, intSec.IP, intSec.Port, hostKey.Key, intSec.DynamicPublicKey, intSec.InstallScript, false) + if err != nil { + return nil, fmt.Errorf("error removing public key from authorized_keys file in target") + } + return nil, nil +} diff --git a/builtin/logical/ssh/secret_otp.go b/builtin/logical/ssh/secret_otp.go index a70cf601cfe50..72e9903f16bb1 100644 --- a/builtin/logical/ssh/secret_otp.go +++ b/builtin/logical/ssh/secret_otp.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go index b886750679076..1923caa346b3d 100644 --- a/builtin/logical/ssh/util.go +++ b/builtin/logical/ssh/util.go @@ -1,9 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ssh import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -13,7 +11,9 @@ import ( "fmt" "net" "strings" + "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/logical" "golang.org/x/crypto/ssh" @@ -40,6 +40,69 @@ func generateRSAKeys(keyBits int) (publicKeyRsa string, privateKeyRsa string, er return } +// Public key and the script to install the key are uploaded to remote machine. +// Public key is either added or removed from authorized_keys file using the +// script. Default script is for a Linux machine and hence the path of the +// authorized_keys file is hard coded to resemble Linux. +// +// The last param 'install' if false, uninstalls the key. +func (b *backend) installPublicKeyInTarget(ctx context.Context, adminUser, username, ip string, port int, hostkey, dynamicPublicKey, installScript string, install bool) error { + // Transfer the newly generated public key to remote host under a random + // file name. This is to avoid name collisions from other requests. + _, publicKeyFileName, err := b.GenerateSaltedOTP(ctx) + if err != nil { + return err + } + + comm, err := createSSHComm(b.Logger(), adminUser, ip, port, hostkey) + if err != nil { + return err + } + defer comm.Close() + + err = comm.Upload(publicKeyFileName, bytes.NewBufferString(dynamicPublicKey), nil) + if err != nil { + return fmt.Errorf("error uploading public key: %w", err) + } + + // Transfer the script required to install or uninstall the key to the remote + // host under a random file name as well. This is to avoid name collisions + // from other requests. + scriptFileName := fmt.Sprintf("%s.sh", publicKeyFileName) + err = comm.Upload(scriptFileName, bytes.NewBufferString(installScript), nil) + if err != nil { + return fmt.Errorf("error uploading install script: %w", err) + } + + // Create a session to run remote command that triggers the script to install + // or uninstall the key. + session, err := comm.NewSession() + if err != nil { + return fmt.Errorf("unable to create SSH Session using public keys: %w", err) + } + if session == nil { + return fmt.Errorf("invalid session object") + } + defer session.Close() + + authKeysFileName := fmt.Sprintf("/home/%s/.ssh/authorized_keys", username) + + var installOption string + if install { + installOption = "install" + } else { + installOption = "uninstall" + } + + // Give execute permissions to install script, run and delete it. + chmodCmd := fmt.Sprintf("chmod +x %s", scriptFileName) + scriptCmd := fmt.Sprintf("./%s %s %s %s", scriptFileName, installOption, publicKeyFileName, authKeysFileName) + rmCmd := fmt.Sprintf("rm -f %s", scriptFileName) + targetCmd := fmt.Sprintf("%s;%s;%s", chmodCmd, scriptCmd, rmCmd) + + return session.Run(targetCmd) +} + // Takes an IP address and role name and checks if the IP is part // of CIDR blocks belonging to the role. func roleContainsIP(ctx context.Context, s logical.Storage, roleName string, ip string) (bool, error) { @@ -89,6 +152,52 @@ func cidrListContainsIP(ip, cidrList string) (bool, error) { return false, nil } +func insecureIgnoreHostWarning(logger log.Logger) ssh.HostKeyCallback { + return func(hostname string, remote net.Addr, key ssh.PublicKey) error { + logger.Warn("cannot verify server key: host key validation disabled") + return nil + } +} + +func createSSHComm(logger log.Logger, username, ip string, port int, hostkey string) (*comm, error) { + signer, err := ssh.ParsePrivateKey([]byte(hostkey)) + if err != nil { + return nil, err + } + + clientConfig := &ssh.ClientConfig{ + User: username, + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(signer), + }, + HostKeyCallback: insecureIgnoreHostWarning(logger), + Timeout: 1 * time.Minute, + } + + connfunc := func() (net.Conn, error) { + c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, port), 15*time.Second) + if err != nil { + return nil, err + } + + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(5 * time.Second) + } + + return c, nil + } + config := &SSHCommConfig{ + SSHConfig: clientConfig, + Connection: connfunc, + Pty: false, + DisableAgent: true, + Logger: logger, + } + + return SSHCommNew(fmt.Sprintf("%s:%d", ip, port), config) +} + func parsePublicSSHKey(key string) (ssh.PublicKey, error) { keyParts := strings.Split(key, " ") if len(keyParts) > 1 { diff --git a/builtin/logical/totp/backend.go b/builtin/logical/totp/backend.go index 5f0cb52c51752..d2494b499549c 100644 --- a/builtin/logical/totp/backend.go +++ b/builtin/logical/totp/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package totp import ( @@ -13,8 +10,6 @@ import ( cache "github.com/patrickmn/go-cache" ) -const operationPrefixTOTP = "totp" - func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/logical/totp/backend_test.go b/builtin/logical/totp/backend_test.go index 12600427f27f0..0b68599df64c7 100644 --- a/builtin/logical/totp/backend_test.go +++ b/builtin/logical/totp/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package totp import ( diff --git a/builtin/logical/totp/cmd/totp/main.go b/builtin/logical/totp/cmd/totp/main.go index 9a2a49bd27551..4c96df7f31466 100644 --- a/builtin/logical/totp/cmd/totp/main.go +++ b/builtin/logical/totp/cmd/totp/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: totp.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/totp/path_code.go b/builtin/logical/totp/path_code.go index c792a2905ef01..af56f37da689d 100644 --- a/builtin/logical/totp/path_code.go +++ b/builtin/logical/totp/path_code.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package totp import ( @@ -17,12 +14,6 @@ import ( func pathCode(b *backend) *framework.Path { return &framework.Path{ Pattern: "code/" + framework.GenericNameWithAtRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTOTP, - OperationSuffix: "code", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -34,19 +25,9 @@ func pathCode(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathReadCode, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "generate", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathValidateCode, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "validate", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathReadCode, + logical.UpdateOperation: b.pathValidateCode, }, HelpSynopsis: pathCodeHelpSyn, diff --git a/builtin/logical/totp/path_keys.go b/builtin/logical/totp/path_keys.go index 05e8e5aecf5b5..d7f7f2abe323b 100644 --- a/builtin/logical/totp/path_keys.go +++ b/builtin/logical/totp/path_keys.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package totp import ( @@ -24,11 +21,6 @@ func pathListKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTOTP, - OperationSuffix: "keys", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKeyList, }, @@ -41,12 +33,6 @@ func pathListKeys(b *backend) *framework.Path { func pathKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameWithAtRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTOTP, - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -122,25 +108,10 @@ func pathKeys(b *backend) *framework.Path { }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathKeyRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "read", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathKeyCreate, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "create", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathKeyDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "delete", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathKeyRead, + logical.UpdateOperation: b.pathKeyCreate, + logical.DeleteOperation: b.pathKeyDelete, }, HelpSynopsis: pathKeyHelpSyn, diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index 03c3b2fda4e42..391b392c69b0c 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -19,12 +16,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const ( - operationPrefixTransit = "transit" - - // Minimum cache size for transit backend - minCacheSize = 10 -) +// Minimum cache size for transit backend +const minCacheSize = 10 func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b, err := Backend(ctx, conf) @@ -50,6 +43,7 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) Paths: []*framework.Path{ // Rotate/Config needs to come before Keys // as the handler is greedy + b.pathConfig(), b.pathRotate(), b.pathRewrap(), b.pathWrappingKey(), @@ -57,9 +51,7 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) b.pathImportVersion(), b.pathKeys(), b.pathListKeys(), - b.pathBYOKExportKeys(), b.pathExportKeys(), - b.pathKeysConfig(), b.pathEncrypt(), b.pathDecrypt(), b.pathDatakey(), @@ -72,7 +64,6 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) b.pathRestore(), b.pathTrim(), b.pathCacheConfig(), - b.pathConfigKeys(), }, Secrets: []*framework.Secret{}, @@ -81,8 +72,6 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) PeriodicFunc: b.periodicFunc, } - b.backendUUID = conf.BackendUUID - // determine cacheSize to use. Defaults to 0 which means unlimited cacheSize := 0 useCache := !conf.System.CachingDisabled() @@ -116,7 +105,6 @@ type backend struct { cacheSizeChanged bool checkAutoRotateAfter time.Time autoRotateOnce sync.Once - backendUUID string } func GetCacheSizeFromStorage(ctx context.Context, s logical.Storage) (int, error) { diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index d23c19465ff7f..c4d92a3ddbf57 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -1,18 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" - "crypto" - "crypto/ed25519" cryptoRand "crypto/rand" - "crypto/x509" "encoding/base64" - "encoding/pem" "fmt" - "io" "math/rand" "os" "path" @@ -23,20 +15,13 @@ import ( "testing" "time" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/pki" + uuid "github.com/hashicorp/go-uuid" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - - uuid "github.com/hashicorp/go-uuid" "github.com/mitchellh/mapstructure" - - "github.com/stretchr/testify/require" ) const ( @@ -304,35 +289,6 @@ func testTransit_RSA(t *testing.T, keyType string) { if !resp.Data["valid"].(bool) { t.Fatalf("failed to verify the RSA signature") } - - // Take a random hash and sign it using PKCSv1_5_NoOID. - hash := "P8m2iUWdc4+MiKOkiqnjNUIBa3pAUuABqqU2/KdIE8s=" - signReq.Data = map[string]interface{}{ - "input": hash, - "hash_algorithm": "none", - "signature_algorithm": "pkcs1v15", - "prehashed": true, - } - resp, err = b.HandleRequest(context.Background(), signReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - signature = resp.Data["signature"].(string) - - verifyReq.Data = map[string]interface{}{ - "input": hash, - "signature": signature, - "hash_algorithm": "none", - "signature_algorithm": "pkcs1v15", - "prehashed": true, - } - resp, err = b.HandleRequest(context.Background(), verifyReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - if !resp.Data["valid"].(bool) { - t.Fatalf("failed to verify the RSA signature") - } } func TestBackend_basic(t *testing.T) { @@ -1072,7 +1028,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT if err != nil { t.Fatal(err) } - require.NotNil(t, resp, "expected populated request") + if resp != nil { + t.Fatal("expected nil response") + } p, err := keysutil.LoadPolicy(context.Background(), storage, path.Join("policy", "testkey")) if err != nil { @@ -1526,8 +1484,8 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { // keys start at version 1 so we want [1, latestVersion] not [0, latestVersion) setVersion := (rand.Int() % latestVersion) + 1 fd.Raw["min_decryption_version"] = setVersion - fd.Schema = be.pathKeysConfig().Fields - resp, err = be.pathKeysConfigWrite(context.Background(), req, fd) + fd.Schema = be.pathConfig().Fields + resp, err = be.pathConfigWrite(context.Background(), req, fd) if err != nil { t.Errorf("got an error setting min decryption version: %v", err) } @@ -1558,7 +1516,9 @@ func TestBadInput(t *testing.T) { if err != nil { t.Fatal(err) } - require.NotNil(t, resp, "expected populated request") + if resp != nil { + t.Fatal("expected nil response") + } req.Path = "decrypt/test" req.Data = map[string]interface{}{ @@ -1647,7 +1607,9 @@ func TestTransit_AutoRotateKeys(t *testing.T) { if err != nil { t.Fatal(err) } - require.NotNil(t, resp, "expected populated request") + if resp != nil { + t.Fatal("expected nil response") + } // Write a key with an auto rotate value one day in the future req = &logical.Request{ @@ -1662,7 +1624,9 @@ func TestTransit_AutoRotateKeys(t *testing.T) { if err != nil { t.Fatal(err) } - require.NotNil(t, resp, "expected populated request") + if resp != nil { + t.Fatal("expected nil response") + } // Run the rotation check and ensure none of the keys have rotated b.checkAutoRotateAfter = time.Now() @@ -1753,546 +1717,3 @@ func TestTransit_AutoRotateKeys(t *testing.T) { ) } } - -func TestTransit_AEAD(t *testing.T) { - testTransit_AEAD(t, "aes128-gcm96") - testTransit_AEAD(t, "aes256-gcm96") - testTransit_AEAD(t, "chacha20-poly1305") -} - -func testTransit_AEAD(t *testing.T, keyType string) { - var resp *logical.Response - var err error - b, storage := createBackendWithStorage(t) - - keyReq := &logical.Request{ - Path: "keys/aead", - Operation: logical.UpdateOperation, - Data: map[string]interface{}{ - "type": keyType, - }, - Storage: storage, - } - - resp, err = b.HandleRequest(context.Background(), keyReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - - plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" - associated := "U3BoaW54IG9mIGJsYWNrIHF1YXJ0eiwganVkZ2UgbXkgdm93Lgo=" // "Sphinx of black quartz, judge my vow." - - // Basic encrypt/decrypt should work. - encryptReq := &logical.Request{ - Path: "encrypt/aead", - Operation: logical.UpdateOperation, - Storage: storage, - Data: map[string]interface{}{ - "plaintext": plaintext, - }, - } - - resp, err = b.HandleRequest(context.Background(), encryptReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - - ciphertext1 := resp.Data["ciphertext"].(string) - - decryptReq := &logical.Request{ - Path: "decrypt/aead", - Operation: logical.UpdateOperation, - Storage: storage, - Data: map[string]interface{}{ - "ciphertext": ciphertext1, - }, - } - - resp, err = b.HandleRequest(context.Background(), decryptReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - - decryptedPlaintext := resp.Data["plaintext"] - - if plaintext != decryptedPlaintext { - t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) - } - - // Using associated as ciphertext should fail. - decryptReq.Data["ciphertext"] = associated - resp, err = b.HandleRequest(context.Background(), decryptReq) - if err == nil || (resp != nil && !resp.IsError()) { - t.Fatalf("bad expected error: err: %v\nresp: %#v", err, resp) - } - - // Redoing the above with additional data should work. - encryptReq.Data["associated_data"] = associated - resp, err = b.HandleRequest(context.Background(), encryptReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - - ciphertext2 := resp.Data["ciphertext"].(string) - decryptReq.Data["ciphertext"] = ciphertext2 - decryptReq.Data["associated_data"] = associated - - resp, err = b.HandleRequest(context.Background(), decryptReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } - - decryptedPlaintext = resp.Data["plaintext"] - if plaintext != decryptedPlaintext { - t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) - } - - // Removing the associated_data should break the decryption. - decryptReq.Data = map[string]interface{}{ - "ciphertext": ciphertext2, - } - resp, err = b.HandleRequest(context.Background(), decryptReq) - if err == nil || (resp != nil && !resp.IsError()) { - t.Fatalf("bad expected error: err: %v\nresp: %#v", err, resp) - } - - // Using a valid ciphertext with associated_data should also break the - // decryption. - decryptReq.Data["ciphertext"] = ciphertext1 - decryptReq.Data["associated_data"] = associated - resp, err = b.HandleRequest(context.Background(), decryptReq) - if err == nil || (resp != nil && !resp.IsError()) { - t.Fatalf("bad expected error: err: %v\nresp: %#v", err, resp) - } -} - -// Hack: use Transit as a signer. -type transitKey struct { - public any - mount string - name string - t *testing.T - client *api.Client -} - -func (k *transitKey) Public() crypto.PublicKey { - return k.public -} - -func (k *transitKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { - hash := opts.(crypto.Hash) - if hash.String() != "SHA-256" { - return nil, fmt.Errorf("unknown hash algorithm: %v", opts) - } - - resp, err := k.client.Logical().Write(k.mount+"/sign/"+k.name, map[string]interface{}{ - "hash_algorithm": "sha2-256", - "input": base64.StdEncoding.EncodeToString(digest), - "prehashed": true, - "signature_algorithm": "pkcs1v15", - }) - if err != nil { - return nil, fmt.Errorf("failed to sign data: %w", err) - } - require.NotNil(k.t, resp) - require.NotNil(k.t, resp.Data) - require.NotNil(k.t, resp.Data["signature"]) - rawSig := resp.Data["signature"].(string) - sigParts := strings.Split(rawSig, ":") - - decoded, err := base64.StdEncoding.DecodeString(sigParts[2]) - if err != nil { - return nil, fmt.Errorf("failed to decode signature (%v): %w", rawSig, err) - } - - return decoded, nil -} - -func TestTransitPKICSR(t *testing.T) { - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "transit": Factory, - "pki": pki.Factory, - }, - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - - vault.TestWaitActive(t, cores[0].Core) - - client := cores[0].Client - - // Mount transit, write a key. - err := client.Sys().Mount("transit", &api.MountInput{ - Type: "transit", - }) - require.NoError(t, err) - - _, err = client.Logical().Write("transit/keys/leaf", map[string]interface{}{ - "type": "rsa-2048", - }) - require.NoError(t, err) - - resp, err := client.Logical().Read("transit/keys/leaf") - require.NoError(t, err) - require.NotNil(t, resp) - - keys := resp.Data["keys"].(map[string]interface{}) - require.NotNil(t, keys) - keyData := keys["1"].(map[string]interface{}) - require.NotNil(t, keyData) - keyPublic := keyData["public_key"].(string) - require.NotEmpty(t, keyPublic) - - pemBlock, _ := pem.Decode([]byte(keyPublic)) - require.NotNil(t, pemBlock) - pubKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - require.NoError(t, err) - require.NotNil(t, pubKey) - - // Setup a new CSR... - var reqTemplate x509.CertificateRequest - reqTemplate.PublicKey = pubKey - reqTemplate.PublicKeyAlgorithm = x509.RSA - reqTemplate.Subject.CommonName = "dadgarcorp.com" - - var k transitKey - k.public = pubKey - k.mount = "transit" - k.name = "leaf" - k.t = t - k.client = client - - req, err := x509.CreateCertificateRequest(cryptoRand.Reader, &reqTemplate, &k) - require.NoError(t, err) - require.NotNil(t, req) - - reqPEM := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: req, - }) - t.Logf("csr: %v", string(reqPEM)) - - // Mount PKI, generate a root, sign this CSR. - err = client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }) - require.NoError(t, err) - - resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "common_name": "PKI Root X1", - }) - require.NoError(t, err) - require.NotNil(t, resp) - rootCertPEM := resp.Data["certificate"].(string) - - pemBlock, _ = pem.Decode([]byte(rootCertPEM)) - require.NotNil(t, pemBlock) - - rootCert, err := x509.ParseCertificate(pemBlock.Bytes) - require.NoError(t, err) - - resp, err = client.Logical().Write("pki/issuer/default/sign-verbatim", map[string]interface{}{ - "csr": string(reqPEM), - "ttl": "10m", - }) - require.NoError(t, err) - require.NotNil(t, resp) - - leafCertPEM := resp.Data["certificate"].(string) - pemBlock, _ = pem.Decode([]byte(leafCertPEM)) - require.NotNil(t, pemBlock) - - leafCert, err := x509.ParseCertificate(pemBlock.Bytes) - require.NoError(t, err) - require.NoError(t, leafCert.CheckSignatureFrom(rootCert)) - t.Logf("root: %v", rootCertPEM) - t.Logf("leaf: %v", leafCertPEM) -} - -func TestTransit_ReadPublicKeyImported(t *testing.T) { - testTransit_ReadPublicKeyImported(t, "rsa-2048") - testTransit_ReadPublicKeyImported(t, "ecdsa-p256") - testTransit_ReadPublicKeyImported(t, "ed25519") -} - -func testTransit_ReadPublicKeyImported(t *testing.T, keyType string) { - generateKeys(t) - b, s := createBackendWithStorage(t) - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get key - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatalf("failed to extract the public key: %s", err) - } - - // Import key - importReq := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - importResp, err := b.HandleRequest(context.Background(), importReq) - if err != nil || (importResp != nil && importResp.IsError()) { - t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) - } - - // Read key - readReq := &logical.Request{ - Operation: logical.ReadOperation, - Path: "keys/" + keyID, - Storage: s, - } - - readResp, err := b.HandleRequest(context.Background(), readReq) - if err != nil || (readResp != nil && readResp.IsError()) { - t.Fatalf("failed to read key. err: %s\nresp: %#v", err, readResp) - } -} - -func TestTransit_SignWithImportedPublicKey(t *testing.T) { - testTransit_SignWithImportedPublicKey(t, "rsa-2048") - testTransit_SignWithImportedPublicKey(t, "ecdsa-p256") - testTransit_SignWithImportedPublicKey(t, "ed25519") -} - -func testTransit_SignWithImportedPublicKey(t *testing.T, keyType string) { - generateKeys(t) - b, s := createBackendWithStorage(t) - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get key - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatalf("failed to extract the public key: %s", err) - } - - // Import key - importReq := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - importResp, err := b.HandleRequest(context.Background(), importReq) - if err != nil || (importResp != nil && importResp.IsError()) { - t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) - } - - // Sign text - signReq := &logical.Request{ - Path: "sign/" + keyID, - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), - }, - } - - _, err = b.HandleRequest(context.Background(), signReq) - if err == nil { - t.Fatalf("expected error, should have failed to sign input") - } -} - -func TestTransit_VerifyWithImportedPublicKey(t *testing.T) { - generateKeys(t) - keyType := "rsa-2048" - b, s := createBackendWithStorage(t) - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get key - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Retrieve public wrapping key - wrappingKey, err := b.getWrappingKey(context.Background(), s) - if err != nil || wrappingKey == nil { - t.Fatalf("failed to retrieve public wrapping key: %s", err) - } - - privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey - pubWrappingKey := &privWrappingKey.PublicKey - - // generate ciphertext - importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") - - // Import private key - importReq := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "ciphertext": importBlob, - "type": keyType, - }, - } - importResp, err := b.HandleRequest(context.Background(), importReq) - if err != nil || (importResp != nil && importResp.IsError()) { - t.Fatalf("failed to import key. err: %s\nresp: %#v", err, importResp) - } - - // Sign text - signReq := &logical.Request{ - Storage: s, - Path: "sign/" + keyID, - Operation: logical.UpdateOperation, - Data: map[string]interface{}{ - "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), - }, - } - - signResp, err := b.HandleRequest(context.Background(), signReq) - if err != nil || (signResp != nil && signResp.IsError()) { - t.Fatalf("failed to sign plaintext. err: %s\nresp: %#v", err, signResp) - } - - // Get signature - signature := signResp.Data["signature"].(string) - - // Import new key as public key - importPubReq := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", "public-key-rsa"), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - importPubResp, err := b.HandleRequest(context.Background(), importPubReq) - if err != nil || (importPubResp != nil && importPubResp.IsError()) { - t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importPubResp) - } - - // Verify signed text - verifyReq := &logical.Request{ - Path: "verify/public-key-rsa", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "input": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), - "signature": signature, - }, - } - - verifyResp, err := b.HandleRequest(context.Background(), verifyReq) - if err != nil || (importResp != nil && verifyResp.IsError()) { - t.Fatalf("failed to verify signed data. err: %s\nresp: %#v", err, importResp) - } -} - -func TestTransit_ExportPublicKeyImported(t *testing.T) { - testTransit_ExportPublicKeyImported(t, "rsa-2048") - testTransit_ExportPublicKeyImported(t, "ecdsa-p256") - testTransit_ExportPublicKeyImported(t, "ed25519") -} - -func testTransit_ExportPublicKeyImported(t *testing.T, keyType string) { - generateKeys(t) - b, s := createBackendWithStorage(t) - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get key - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatalf("failed to extract the public key: %s", err) - } - - t.Logf("generated key: %v", string(publicKeyBytes)) - - // Import key - importReq := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - "exportable": true, - }, - } - importResp, err := b.HandleRequest(context.Background(), importReq) - if err != nil || (importResp != nil && importResp.IsError()) { - t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) - } - - t.Logf("importing key: %v", importResp) - - // Export key - exportReq := &logical.Request{ - Operation: logical.ReadOperation, - Path: fmt.Sprintf("export/public-key/%s/latest", keyID), - Storage: s, - } - - exportResp, err := b.HandleRequest(context.Background(), exportReq) - if err != nil || (exportResp != nil && exportResp.IsError()) { - t.Fatalf("failed to export key. err: %v\nresp: %#v", err, exportResp) - } - - t.Logf("exporting key: %v", exportResp) - - responseKeys, exist := exportResp.Data["keys"] - if !exist { - t.Fatal("expected response data to hold a 'keys' field") - } - - exportedKeyBytes := responseKeys.(map[string]string)["1"] - - if keyType != "ed25519" { - exportedKeyBlock, _ := pem.Decode([]byte(exportedKeyBytes)) - publicKeyBlock, _ := pem.Decode(publicKeyBytes) - - if !reflect.DeepEqual(publicKeyBlock.Bytes, exportedKeyBlock.Bytes) { - t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKeyBlock.Bytes, publicKeyBlock.Bytes) - } - } else { - exportedKey, err := base64.StdEncoding.DecodeString(exportedKeyBytes) - if err != nil { - t.Fatalf("error decoding exported key bytes (%v) to base64 for key type %v: %v", exportedKeyBytes, keyType, err) - } - - publicKeyBlock, _ := pem.Decode(publicKeyBytes) - publicKeyParsed, err := x509.ParsePKIXPublicKey(publicKeyBlock.Bytes) - if err != nil { - t.Fatalf("error decoding source key bytes (%v) from PKIX marshaling for key type %v: %v", publicKeyBlock.Bytes, keyType, err) - } - - if !reflect.DeepEqual([]byte(publicKeyParsed.(ed25519.PublicKey)), exportedKey) { - t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKey, publicKeyParsed) - } - } -} diff --git a/builtin/logical/transit/cmd/transit/main.go b/builtin/logical/transit/cmd/transit/main.go index 7e2ae8777bdc5..25d4675b90830 100644 --- a/builtin/logical/transit/cmd/transit/main.go +++ b/builtin/logical/transit/cmd/transit/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( @@ -20,11 +17,9 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + if err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: transit.Factory, - // set the TLSProviderFunc so that the plugin maintains backwards - // compatibility with Vault versions that don’t support plugin AutoMTLS - TLSProviderFunc: tlsProviderFunc, + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/transit/managed_key_util.go b/builtin/logical/transit/managed_key_util.go deleted file mode 100644 index c4dc1e9fe0d37..0000000000000 --- a/builtin/logical/transit/managed_key_util.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !enterprise - -package transit - -import ( - "context" - "errors" -) - -var errEntOnly = errors.New("managed keys are supported within enterprise edition only") - -func GetManagedKeyUUID(ctx context.Context, b *backend, keyName string, keyId string) (uuid string, err error) { - return "", errEntOnly -} diff --git a/builtin/logical/transit/path_backup.go b/builtin/logical/transit/path_backup.go index 93833423387d7..ef13f0aab88cd 100644 --- a/builtin/logical/transit/path_backup.go +++ b/builtin/logical/transit/path_backup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -13,13 +10,6 @@ import ( func (b *backend) pathBackup() *framework.Path { return &framework.Path{ Pattern: "backup/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "back-up", - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_backup_test.go b/builtin/logical/transit/path_backup_test.go index 3627d6b702a77..d8a54d70fcc13 100644 --- a/builtin/logical/transit/path_backup_test.go +++ b/builtin/logical/transit/path_backup_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_byok.go b/builtin/logical/transit/path_byok.go deleted file mode 100644 index 40f7cac1a472e..0000000000000 --- a/builtin/logical/transit/path_byok.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package transit - -import ( - "context" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "errors" - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/keysutil" - "github.com/hashicorp/vault/sdk/logical" -) - -func (b *backend) pathBYOKExportKeys() *framework.Path { - return &framework.Path{ - Pattern: "byok-export/" + framework.GenericNameRegex("destination") + "/" + framework.GenericNameRegex("source") + framework.OptionalParamRegex("version"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "byok", - OperationSuffix: "key|key-version", - }, - - Fields: map[string]*framework.FieldSchema{ - "destination": { - Type: framework.TypeString, - Description: "Destination key to export to; usually the public wrapping key of another Transit instance.", - }, - "source": { - Type: framework.TypeString, - Description: "Source key to export; could be any present key within Transit.", - }, - "version": { - Type: framework.TypeString, - Description: "Optional version of the key to export, else all key versions are exported.", - }, - "hash": { - Type: framework.TypeString, - Description: "Hash function to use for inner OAEP encryption. Defaults to SHA256.", - Default: "SHA256", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathPolicyBYOKExportRead, - }, - - HelpSynopsis: pathBYOKExportHelpSyn, - HelpDescription: pathBYOKExportHelpDesc, - } -} - -func (b *backend) pathPolicyBYOKExportRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - dst := d.Get("destination").(string) - src := d.Get("source").(string) - version := d.Get("version").(string) - hash := d.Get("hash").(string) - - dstP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ - Storage: req.Storage, - Name: dst, - }, b.GetRandomReader()) - if err != nil { - return nil, err - } - if dstP == nil { - return nil, fmt.Errorf("no such destination key to export to") - } - if !b.System().CachingDisabled() { - dstP.Lock(false) - } - defer dstP.Unlock() - - srcP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ - Storage: req.Storage, - Name: src, - }, b.GetRandomReader()) - if err != nil { - return nil, err - } - if srcP == nil { - return nil, fmt.Errorf("no such source key for export") - } - if !b.System().CachingDisabled() { - srcP.Lock(false) - } - defer srcP.Unlock() - - if !srcP.Exportable { - return logical.ErrorResponse("key is not exportable"), nil - } - - retKeys := map[string]string{} - switch version { - case "": - for k, v := range srcP.Keys { - exportKey, err := getBYOKExportKey(dstP, srcP, &v, hash) - if err != nil { - return nil, err - } - retKeys[k] = exportKey - } - - default: - var versionValue int - if version == "latest" { - versionValue = srcP.LatestVersion - } else { - version = strings.TrimPrefix(version, "v") - versionValue, err = strconv.Atoi(version) - if err != nil { - return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest - } - } - - if versionValue < srcP.MinDecryptionVersion { - return logical.ErrorResponse("version for export is below minimum decryption version"), logical.ErrInvalidRequest - } - key, ok := srcP.Keys[strconv.Itoa(versionValue)] - if !ok { - return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest - } - - exportKey, err := getBYOKExportKey(dstP, srcP, &key, hash) - if err != nil { - return nil, err - } - - retKeys[strconv.Itoa(versionValue)] = exportKey - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "name": srcP.Name, - "type": srcP.Type.String(), - "keys": retKeys, - }, - } - - return resp, nil -} - -func getBYOKExportKey(dstP *keysutil.Policy, srcP *keysutil.Policy, key *keysutil.KeyEntry, hash string) (string, error) { - if dstP == nil || srcP == nil { - return "", errors.New("nil policy provided") - } - - var targetKey interface{} - switch srcP.Type { - case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305, keysutil.KeyType_HMAC: - targetKey = key.Key - case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - targetKey = key.RSAKey - case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: - var curve elliptic.Curve - switch srcP.Type { - case keysutil.KeyType_ECDSA_P384: - curve = elliptic.P384() - case keysutil.KeyType_ECDSA_P521: - curve = elliptic.P521() - default: - curve = elliptic.P256() - } - pubKey := ecdsa.PublicKey{ - Curve: curve, - X: key.EC_X, - Y: key.EC_Y, - } - targetKey = &ecdsa.PrivateKey{ - PublicKey: pubKey, - D: key.EC_D, - } - case keysutil.KeyType_ED25519: - targetKey = ed25519.PrivateKey(key.Key) - default: - return "", fmt.Errorf("unable to export to unknown key type: %v", srcP.Type) - } - - hasher, err := parseHashFn(hash) - if err != nil { - return "", err - } - - return dstP.WrapKey(0, targetKey, srcP.Type, hasher) -} - -const pathBYOKExportHelpSyn = `Securely export named encryption or signing key` - -const pathBYOKExportHelpDesc = ` -This path is used to export the named keys that are configured as -exportable. - -Unlike the regular /export/:name[/:version] paths, this path uses -the same encryption specification /import, allowing secure migration -of keys between clusters to enable workloads to communicate between -them. - -Presently this only works for RSA destination keys. -` diff --git a/builtin/logical/transit/path_byok_test.go b/builtin/logical/transit/path_byok_test.go deleted file mode 100644 index a05a719ea4f21..0000000000000 --- a/builtin/logical/transit/path_byok_test.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package transit - -import ( - "context" - "testing" - - "github.com/hashicorp/vault/sdk/logical" -) - -func TestTransit_BYOKExportImport(t *testing.T) { - // Test encryption/decryption after a restore for supported keys - testBYOKExportImport(t, "aes128-gcm96", "encrypt-decrypt") - testBYOKExportImport(t, "aes256-gcm96", "encrypt-decrypt") - testBYOKExportImport(t, "chacha20-poly1305", "encrypt-decrypt") - testBYOKExportImport(t, "rsa-2048", "encrypt-decrypt") - testBYOKExportImport(t, "rsa-3072", "encrypt-decrypt") - testBYOKExportImport(t, "rsa-4096", "encrypt-decrypt") - - // Test signing/verification after a restore for supported keys - testBYOKExportImport(t, "ecdsa-p256", "sign-verify") - testBYOKExportImport(t, "ecdsa-p384", "sign-verify") - testBYOKExportImport(t, "ecdsa-p521", "sign-verify") - testBYOKExportImport(t, "ed25519", "sign-verify") - testBYOKExportImport(t, "rsa-2048", "sign-verify") - testBYOKExportImport(t, "rsa-3072", "sign-verify") - testBYOKExportImport(t, "rsa-4096", "sign-verify") - - // Test HMAC sign/verify after a restore for supported keys. - testBYOKExportImport(t, "hmac", "hmac-verify") -} - -func testBYOKExportImport(t *testing.T, keyType, feature string) { - var resp *logical.Response - var err error - - b, s := createBackendWithStorage(t) - - // Create a key - keyReq := &logical.Request{ - Path: "keys/test-source", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "type": keyType, - "exportable": true, - }, - } - if keyType == "hmac" { - keyReq.Data["key_size"] = 32 - } - resp, err = b.HandleRequest(context.Background(), keyReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - - // Read the wrapping key. - wrapKeyReq := &logical.Request{ - Path: "wrapping_key", - Operation: logical.ReadOperation, - Storage: s, - } - resp, err = b.HandleRequest(context.Background(), wrapKeyReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - - // Import the wrapping key. - wrapKeyImportReq := &logical.Request{ - Path: "keys/wrapper/import", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "public_key": resp.Data["public_key"], - "type": "rsa-4096", - }, - } - resp, err = b.HandleRequest(context.Background(), wrapKeyImportReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - - // Export the key - backupReq := &logical.Request{ - Path: "byok-export/wrapper/test-source", - Operation: logical.ReadOperation, - Storage: s, - } - resp, err = b.HandleRequest(context.Background(), backupReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - keys := resp.Data["keys"].(map[string]string) - - // Import the key to a new name. - restoreReq := &logical.Request{ - Path: "keys/test/import", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "ciphertext": keys["1"], - "type": keyType, - }, - } - resp, err = b.HandleRequest(context.Background(), restoreReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - - plaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" - // Perform encryption, signing or hmac-ing based on the set 'feature' - var encryptReq, signReq, hmacReq *logical.Request - var ciphertext, signature, hmac string - switch feature { - case "encrypt-decrypt": - encryptReq = &logical.Request{ - Path: "encrypt/test-source", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "plaintext": plaintextB64, - }, - } - resp, err = b.HandleRequest(context.Background(), encryptReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - ciphertext = resp.Data["ciphertext"].(string) - - case "sign-verify": - signReq = &logical.Request{ - Path: "sign/test-source", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "input": plaintextB64, - }, - } - resp, err = b.HandleRequest(context.Background(), signReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - signature = resp.Data["signature"].(string) - - case "hmac-verify": - hmacReq = &logical.Request{ - Path: "hmac/test-source", - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "input": plaintextB64, - }, - } - resp, err = b.HandleRequest(context.Background(), hmacReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - hmac = resp.Data["hmac"].(string) - } - - // validationFunc verifies the ciphertext, signature or hmac based on the - // set 'feature' - validationFunc := func(keyName string) { - var decryptReq *logical.Request - var verifyReq *logical.Request - switch feature { - case "encrypt-decrypt": - decryptReq = &logical.Request{ - Path: "decrypt/" + keyName, - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "ciphertext": ciphertext, - }, - } - resp, err = b.HandleRequest(context.Background(), decryptReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - - if resp.Data["plaintext"].(string) != plaintextB64 { - t.Fatalf("bad: plaintext; expected: %q, actual: %q", plaintextB64, resp.Data["plaintext"].(string)) - } - case "sign-verify": - verifyReq = &logical.Request{ - Path: "verify/" + keyName, - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "signature": signature, - "input": plaintextB64, - }, - } - resp, err = b.HandleRequest(context.Background(), verifyReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - if resp.Data["valid"].(bool) != true { - t.Fatalf("bad: signature verification failed for key type %q", keyType) - } - - case "hmac-verify": - verifyReq = &logical.Request{ - Path: "verify/" + keyName, - Operation: logical.UpdateOperation, - Storage: s, - Data: map[string]interface{}{ - "hmac": hmac, - "input": plaintextB64, - }, - } - resp, err = b.HandleRequest(context.Background(), verifyReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("resp: %#v\nerr: %v", resp, err) - } - if resp.Data["valid"].(bool) != true { - t.Fatalf("bad: HMAC verification failed for key type %q", keyType) - } - } - } - - // Ensure that the restored key is functional - validationFunc("test") - - // Ensure the original key is functional - validationFunc("test-source") -} diff --git a/builtin/logical/transit/path_cache_config.go b/builtin/logical/transit/path_cache_config.go index f8f0cea551eed..e7692997668df 100644 --- a/builtin/logical/transit/path_cache_config.go +++ b/builtin/logical/transit/path_cache_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -14,11 +11,6 @@ import ( func (b *backend) pathCacheConfig() *framework.Path { return &framework.Path{ Pattern: "cache-config", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - }, - Fields: map[string]*framework.FieldSchema{ "size": { Type: framework.TypeInt, @@ -32,27 +24,16 @@ func (b *backend) pathCacheConfig() *framework.Path { logical.ReadOperation: &framework.PathOperation{ Callback: b.pathCacheConfigRead, Summary: "Returns the size of the active cache", - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "cache-configuration", - }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCacheConfigWrite, Summary: "Configures a new cache of the specified size", - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "cache", - }, }, logical.CreateOperation: &framework.PathOperation{ Callback: b.pathCacheConfigWrite, Summary: "Configures a new cache of the specified size", - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "cache", - }, }, }, @@ -84,11 +65,7 @@ func (b *backend) pathCacheConfigWrite(ctx context.Context, req *logical.Request return nil, err } - return &logical.Response{ - Data: map[string]interface{}{ - "size": cacheSize, - }, - }, nil + return nil, nil } type configCache struct { diff --git a/builtin/logical/transit/path_cache_config_test.go b/builtin/logical/transit/path_cache_config_test.go index f5c8316d87892..d8e0a7b56d87a 100644 --- a/builtin/logical/transit/path_cache_config_test.go +++ b/builtin/logical/transit/path_cache_config_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_config.go b/builtin/logical/transit/path_config.go new file mode 100644 index 0000000000000..a3d72731b773e --- /dev/null +++ b/builtin/logical/transit/path_config.go @@ -0,0 +1,237 @@ +package transit + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathConfig() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/config", + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "min_decryption_version": { + Type: framework.TypeInt, + Description: `If set, the minimum version of the key allowed +to be decrypted. For signing keys, the minimum +version allowed to be used for verification.`, + }, + + "min_encryption_version": { + Type: framework.TypeInt, + Description: `If set, the minimum version of the key allowed +to be used for encryption; or for signing keys, +to be used for signing. If set to zero, only +the latest version of the key is allowed.`, + }, + + "deletion_allowed": { + Type: framework.TypeBool, + Description: "Whether to allow deletion of the key", + }, + + "exportable": { + Type: framework.TypeBool, + Description: `Enables export of the key. Once set, this cannot be disabled.`, + }, + + "allow_plaintext_backup": { + Type: framework.TypeBool, + Description: `Enables taking a backup of the named key in plaintext format. Once set, this cannot be disabled.`, + }, + + "auto_rotate_period": { + Type: framework.TypeDurationSecond, + Description: `Amount of time the key should live before +being automatically rotated. A value of 0 +disables automatic rotation for the key.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConfigWrite, + }, + + HelpSynopsis: pathConfigHelpSyn, + HelpDescription: pathConfigHelpDesc, + } +} + +func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (resp *logical.Response, retErr error) { + name := d.Get("name").(string) + + // Check if the policy already exists before we lock everything + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse( + fmt.Sprintf("no existing key named %s could be found", name)), + logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(true) + } + defer p.Unlock() + + originalMinDecryptionVersion := p.MinDecryptionVersion + originalMinEncryptionVersion := p.MinEncryptionVersion + originalDeletionAllowed := p.DeletionAllowed + originalExportable := p.Exportable + originalAllowPlaintextBackup := p.AllowPlaintextBackup + + defer func() { + if retErr != nil || (resp != nil && resp.IsError()) { + p.MinDecryptionVersion = originalMinDecryptionVersion + p.MinEncryptionVersion = originalMinEncryptionVersion + p.DeletionAllowed = originalDeletionAllowed + p.Exportable = originalExportable + p.AllowPlaintextBackup = originalAllowPlaintextBackup + } + }() + + resp = &logical.Response{} + + persistNeeded := false + + minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version") + if ok { + minDecryptionVersion := minDecryptionVersionRaw.(int) + + if minDecryptionVersion < 0 { + return logical.ErrorResponse("min decryption version cannot be negative"), nil + } + + if minDecryptionVersion == 0 { + minDecryptionVersion = 1 + resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1") + } + + if minDecryptionVersion != p.MinDecryptionVersion { + if minDecryptionVersion > p.LatestVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil + } + p.MinDecryptionVersion = minDecryptionVersion + persistNeeded = true + } + } + + minEncryptionVersionRaw, ok := d.GetOk("min_encryption_version") + if ok { + minEncryptionVersion := minEncryptionVersionRaw.(int) + + if minEncryptionVersion < 0 { + return logical.ErrorResponse("min encryption version cannot be negative"), nil + } + + if minEncryptionVersion != p.MinEncryptionVersion { + if minEncryptionVersion > p.LatestVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min encryption version of %d, latest key version is %d", minEncryptionVersion, p.LatestVersion)), nil + } + p.MinEncryptionVersion = minEncryptionVersion + persistNeeded = true + } + } + + // Check here to get the final picture after the logic on each + // individually. MinDecryptionVersion will always be 1 or above. + if p.MinEncryptionVersion > 0 && + p.MinEncryptionVersion < p.MinDecryptionVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min encryption/decryption values; min encryption version of %d must be greater than or equal to min decryption version of %d", p.MinEncryptionVersion, p.MinDecryptionVersion)), nil + } + + allowDeletionInt, ok := d.GetOk("deletion_allowed") + if ok { + allowDeletion := allowDeletionInt.(bool) + if allowDeletion != p.DeletionAllowed { + p.DeletionAllowed = allowDeletion + persistNeeded = true + } + } + + // Add this as a guard here before persisting since we now require the min + // decryption version to start at 1; even if it's not explicitly set here, + // force the upgrade + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + persistNeeded = true + } + + exportableRaw, ok := d.GetOk("exportable") + if ok { + exportable := exportableRaw.(bool) + // Don't unset the already set value + if exportable && !p.Exportable { + p.Exportable = exportable + persistNeeded = true + } + } + + allowPlaintextBackupRaw, ok := d.GetOk("allow_plaintext_backup") + if ok { + allowPlaintextBackup := allowPlaintextBackupRaw.(bool) + // Don't unset the already set value + if allowPlaintextBackup && !p.AllowPlaintextBackup { + p.AllowPlaintextBackup = allowPlaintextBackup + persistNeeded = true + } + } + + autoRotatePeriodRaw, ok, err := d.GetOkErr("auto_rotate_period") + if err != nil { + return nil, err + } + if ok { + autoRotatePeriod := time.Second * time.Duration(autoRotatePeriodRaw.(int)) + // Provided value must be 0 to disable or at least an hour + if autoRotatePeriod != 0 && autoRotatePeriod < time.Hour { + return logical.ErrorResponse("auto rotate period must be 0 to disable or at least an hour"), nil + } + + if autoRotatePeriod != p.AutoRotatePeriod { + p.AutoRotatePeriod = autoRotatePeriod + persistNeeded = true + } + } + + if !persistNeeded { + return nil, nil + } + + switch { + case p.MinAvailableVersion > p.MinEncryptionVersion: + return logical.ErrorResponse("min encryption version should not be less than min available version"), nil + case p.MinAvailableVersion > p.MinDecryptionVersion: + return logical.ErrorResponse("min decryption version should not be less then min available version"), nil + } + + if len(resp.Warnings) == 0 { + return nil, p.Persist(ctx, req.Storage) + } + + return resp, p.Persist(ctx, req.Storage) +} + +const pathConfigHelpSyn = `Configure a named encryption key` + +const pathConfigHelpDesc = ` +This path is used to configure the named key. Currently, this +supports adjusting the minimum version of the key allowed to +be used for decryption via the min_decryption_version parameter. +` diff --git a/builtin/logical/transit/path_config_keys.go b/builtin/logical/transit/path_config_keys.go deleted file mode 100644 index bbf2cc4c07a85..0000000000000 --- a/builtin/logical/transit/path_config_keys.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package transit - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const keysConfigPath = "config/keys" - -type keysConfig struct { - DisableUpsert bool `json:"disable_upsert"` -} - -var defaultKeysConfig = keysConfig{ - DisableUpsert: false, -} - -func (b *backend) pathConfigKeys() *framework.Path { - return &framework.Path{ - Pattern: "config/keys", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - }, - - Fields: map[string]*framework.FieldSchema{ - "disable_upsert": { - Type: framework.TypeBool, - Description: `Whether to allow automatic upserting (creation) of -keys on the encrypt endpoint.`, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigKeysWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure", - OperationSuffix: "keys", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigKeysRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "keys-configuration", - }, - }, - }, - - HelpSynopsis: pathConfigKeysHelpSyn, - HelpDescription: pathConfigKeysHelpDesc, - } -} - -func (b *backend) readConfigKeys(ctx context.Context, req *logical.Request) (*keysConfig, error) { - entry, err := req.Storage.Get(ctx, keysConfigPath) - if err != nil { - return nil, fmt.Errorf("failed to fetch keys configuration: %w", err) - } - - var cfg keysConfig - if entry == nil { - cfg = defaultKeysConfig - return &cfg, nil - } - - if err := entry.DecodeJSON(&cfg); err != nil { - return nil, fmt.Errorf("failed to decode keys configuration: %w", err) - } - - return &cfg, nil -} - -func (b *backend) writeConfigKeys(ctx context.Context, req *logical.Request, cfg *keysConfig) error { - entry, err := logical.StorageEntryJSON(keysConfigPath, cfg) - if err != nil { - return fmt.Errorf("failed to marshal keys configuration: %w", err) - } - - return req.Storage.Put(ctx, entry) -} - -func respondConfigKeys(cfg *keysConfig) *logical.Response { - return &logical.Response{ - Data: map[string]interface{}{ - "disable_upsert": cfg.DisableUpsert, - }, - } -} - -func (b *backend) pathConfigKeysWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - upsert := d.Get("disable_upsert").(bool) - - cfg, err := b.readConfigKeys(ctx, req) - if err != nil { - return nil, err - } - - modified := false - - if cfg.DisableUpsert != upsert { - cfg.DisableUpsert = upsert - modified = true - } - - if modified { - if err := b.writeConfigKeys(ctx, req, cfg); err != nil { - return nil, err - } - } - - return respondConfigKeys(cfg), nil -} - -func (b *backend) pathConfigKeysRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - cfg, err := b.readConfigKeys(ctx, req) - if err != nil { - return nil, err - } - - return respondConfigKeys(cfg), nil -} - -const pathConfigKeysHelpSyn = `Configuration common across all keys` - -const pathConfigKeysHelpDesc = ` -This path is used to configure common functionality across all keys. Currently, -this supports limiting the ability to automatically create new keys when an -unknown key is used for encryption (upsert). -` diff --git a/builtin/logical/transit/path_config_keys_test.go b/builtin/logical/transit/path_config_keys_test.go deleted file mode 100644 index dde7c58a0a946..0000000000000 --- a/builtin/logical/transit/path_config_keys_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package transit - -import ( - "context" - "testing" - - "github.com/hashicorp/vault/sdk/logical" -) - -func TestTransit_ConfigKeys(t *testing.T) { - b, s := createBackendWithSysView(t) - - doReq := func(req *logical.Request) *logical.Response { - resp, err := b.HandleRequest(context.Background(), req) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) - } - return resp - } - doErrReq := func(req *logical.Request) { - resp, err := b.HandleRequest(context.Background(), req) - if err == nil { - if resp == nil || !resp.IsError() { - t.Fatalf("expected error; req:\n%#v\n", *req) - } - } - } - - // First read the global config - req := &logical.Request{ - Storage: s, - Operation: logical.ReadOperation, - Path: "config/keys", - } - resp := doReq(req) - if resp.Data["disable_upsert"].(bool) != false { - t.Fatalf("expected disable_upsert to be false; got: %v", resp) - } - - // Ensure we can upsert. - req.Operation = logical.CreateOperation - req.Path = "encrypt/upsert-1" - req.Data = map[string]interface{}{ - "plaintext": "aGVsbG8K", - } - doReq(req) - - // Disable upserting. - req.Operation = logical.UpdateOperation - req.Path = "config/keys" - req.Data = map[string]interface{}{ - "disable_upsert": true, - } - doReq(req) - - // Attempt upserting again, it should fail. - req.Operation = logical.CreateOperation - req.Path = "encrypt/upsert-2" - req.Data = map[string]interface{}{ - "plaintext": "aGVsbG8K", - } - doErrReq(req) - - // Redoing this with the first key should succeed. - req.Path = "encrypt/upsert-1" - doReq(req) -} diff --git a/builtin/logical/transit/path_config_test.go b/builtin/logical/transit/path_config_test.go new file mode 100644 index 0000000000000..f6dee45090dc4 --- /dev/null +++ b/builtin/logical/transit/path_config_test.go @@ -0,0 +1,405 @@ +package transit + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "strings" + "testing" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestTransit_ConfigSettings(t *testing.T) { + b, storage := createBackendWithSysView(t) + + doReq := func(req *logical.Request) *logical.Response { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) + } + return resp + } + doErrReq := func(req *logical.Request) { + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + if resp == nil || !resp.IsError() { + t.Fatalf("expected error; req:\n%#v\n", *req) + } + } + } + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/aes256", + Data: map[string]interface{}{ + "derived": true, + }, + } + doReq(req) + + req.Path = "keys/aes128" + req.Data["type"] = "aes128-gcm96" + doReq(req) + + req.Path = "keys/ed" + req.Data["type"] = "ed25519" + doReq(req) + + delete(req.Data, "derived") + + req.Path = "keys/p256" + req.Data["type"] = "ecdsa-p256" + doReq(req) + + req.Path = "keys/p384" + req.Data["type"] = "ecdsa-p384" + doReq(req) + + req.Path = "keys/p521" + req.Data["type"] = "ecdsa-p521" + doReq(req) + + delete(req.Data, "type") + + req.Path = "keys/aes128/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/aes256/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/ed/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p256/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p384/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p521/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/aes256/config" + // Too high + req.Data["min_decryption_version"] = 7 + doErrReq(req) + // Too low + req.Data["min_decryption_version"] = -1 + doErrReq(req) + + delete(req.Data, "min_decryption_version") + // Too high + req.Data["min_encryption_version"] = 7 + doErrReq(req) + // Too low + req.Data["min_encryption_version"] = 7 + doErrReq(req) + + // Not allowed, cannot decrypt + req.Data["min_decryption_version"] = 3 + req.Data["min_encryption_version"] = 2 + doErrReq(req) + + // Allowed + req.Data["min_decryption_version"] = 2 + req.Data["min_encryption_version"] = 3 + doReq(req) + req.Path = "keys/aes128/config" + doReq(req) + req.Path = "keys/ed/config" + doReq(req) + req.Path = "keys/p256/config" + doReq(req) + req.Path = "keys/p384/config" + doReq(req) + + req.Path = "keys/p521/config" + doReq(req) + + req.Data = map[string]interface{}{ + "plaintext": "abcd", + "input": "abcd", + "context": "abcd", + } + + maxKeyVersion := 5 + key := "aes256" + + testHMAC := func(ver int, valid bool) { + req.Path = "hmac/" + key + delete(req.Data, "hmac") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["hmac"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong hmac version") + } + + req.Path = "verify/" + key + delete(req.Data, "key_version") + req.Data["hmac"] = resp.Data["hmac"] + doReq(req) + } + + testEncryptDecrypt := func(ver int, valid bool) { + req.Path = "encrypt/" + key + delete(req.Data, "ciphertext") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["ciphertext"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong encryption version") + } + + req.Path = "decrypt/" + key + delete(req.Data, "key_version") + req.Data["ciphertext"] = resp.Data["ciphertext"] + doReq(req) + } + testEncryptDecrypt(5, true) + testEncryptDecrypt(4, true) + testEncryptDecrypt(3, true) + testEncryptDecrypt(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + key = "aes128" + testEncryptDecrypt(5, true) + testEncryptDecrypt(4, true) + testEncryptDecrypt(3, true) + testEncryptDecrypt(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + delete(req.Data, "plaintext") + req.Data["input"] = "abcd" + key = "ed" + testSignVerify := func(ver int, valid bool) { + req.Path = "sign/" + key + delete(req.Data, "signature") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["signature"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong signature version") + } + + req.Path = "verify/" + key + delete(req.Data, "key_version") + req.Data["signature"] = resp.Data["signature"] + doReq(req) + } + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + delete(req.Data, "context") + key = "p256" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + key = "p384" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + key = "p521" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) +} + +func TestTransit_UpdateKeyConfigWithAutorotation(t *testing.T) { + tests := map[string]struct { + initialAutoRotatePeriod interface{} + newAutoRotatePeriod interface{} + shouldError bool + expectedValue time.Duration + }{ + "default (no value)": { + initialAutoRotatePeriod: "5h", + shouldError: false, + expectedValue: 5 * time.Hour, + }, + "0 (int)": { + initialAutoRotatePeriod: "5h", + newAutoRotatePeriod: 0, + shouldError: false, + expectedValue: 0, + }, + "0 (string)": { + initialAutoRotatePeriod: "5h", + newAutoRotatePeriod: 0, + shouldError: false, + expectedValue: 0, + }, + "5 seconds": { + newAutoRotatePeriod: "5s", + shouldError: true, + }, + "5 hours": { + newAutoRotatePeriod: "5h", + shouldError: false, + expectedValue: 5 * time.Hour, + }, + "negative value": { + newAutoRotatePeriod: "-1800s", + shouldError: true, + }, + "invalid string": { + newAutoRotatePeriod: "this shouldn't work", + shouldError: true, + }, + } + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + if err != nil { + t.Fatal(err) + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + keyNameBytes, err := uuid.GenerateRandomBytes(16) + if err != nil { + t.Fatal(err) + } + keyName := hex.EncodeToString(keyNameBytes) + + _, err = client.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "auto_rotate_period": test.initialAutoRotatePeriod, + }) + if err != nil { + t.Fatal(err) + } + resp, err := client.Logical().Write(fmt.Sprintf("transit/keys/%s/config", keyName), map[string]interface{}{ + "auto_rotate_period": test.newAutoRotatePeriod, + }) + switch { + case test.shouldError && err == nil: + t.Fatal("expected non-nil error") + case !test.shouldError && err != nil: + t.Fatal(err) + } + + if !test.shouldError { + resp, err = client.Logical().Read(fmt.Sprintf("transit/keys/%s", keyName)) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + gotRaw, ok := resp.Data["auto_rotate_period"].(json.Number) + if !ok { + t.Fatal("returned value is of unexpected type") + } + got, err := gotRaw.Int64() + if err != nil { + t.Fatal(err) + } + want := int64(test.expectedValue.Seconds()) + if got != want { + t.Fatalf("incorrect auto_rotate_period returned, got: %d, want: %d", got, want) + } + } + }) + } +} diff --git a/builtin/logical/transit/path_datakey.go b/builtin/logical/transit/path_datakey.go index 1c607d0e2f88e..42da161916392 100644 --- a/builtin/logical/transit/path_datakey.go +++ b/builtin/logical/transit/path_datakey.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -19,13 +16,6 @@ import ( func (b *backend) pathDatakey() *framework.Path { return &framework.Path{ Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "generate", - OperationSuffix: "data-key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go index 116732b7f4573..f95e33621e798 100644 --- a/builtin/logical/transit/path_decrypt.go +++ b/builtin/logical/transit/path_decrypt.go @@ -1,12 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" "encoding/base64" - "errors" "fmt" "github.com/hashicorp/vault/sdk/framework" @@ -23,21 +19,11 @@ type DecryptBatchResponseItem struct { // Error, if set represents a failure encountered while encrypting a // corresponding batch request item Error string `json:"error,omitempty" structs:"error" mapstructure:"error"` - - // Reference is an arbitrary caller supplied string value that will be placed on the - // batch response to ease correlation between inputs and outputs - Reference string `json:"reference" structs:"reference" mapstructure:"reference"` } func (b *backend) pathDecrypt() *framework.Path { return &framework.Path{ Pattern: "decrypt/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "decrypt", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -74,17 +60,6 @@ Providing the parameter returns the given response code integer instead of a 400 HTTP 400 is still returned.`, }, - "associated_data": { - Type: framework.TypeString, - Description: ` -When using an AEAD cipher mode, such as AES-GCM, this parameter allows -passing associated data (AD/AAD) into the encryption function; this data -must be passed on subsequent decryption requests but can be transited in -plaintext. On successful decryption, both the ciphertext and the associated -data are attested not to have been tampered with. - `, - }, - "batch_input": { Type: framework.TypeSlice, Description: ` @@ -125,10 +100,9 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d batchInputItems = make([]BatchRequestItem, 1) batchInputItems[0] = BatchRequestItem{ - Ciphertext: ciphertext, - Context: d.Get("context").(string), - Nonce: d.Get("nonce").(string), - AssociatedData: d.Get("associated_data").(string), + Ciphertext: ciphertext, + Context: d.Get("context").(string), + Nonce: d.Get("nonce").(string), } } @@ -191,33 +165,7 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d continue } - var factory interface{} - if item.AssociatedData != "" { - if !p.Type.AssociatedDataSupported() { - batchResponseItems[i].Error = fmt.Sprintf("'[%d].associated_data' provided for non-AEAD cipher suite %v", i, p.Type.String()) - continue - } - - factory = AssocDataFactory{item.AssociatedData} - } - - var managedKeyFactory ManagedKeyFactory - if p.Type == keysutil.KeyType_MANAGED_KEY { - managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) - if !ok { - batchResponseItems[i].Error = errors.New("unsupported system view").Error() - } - - managedKeyFactory = ManagedKeyFactory{ - managedKeyParams: keysutil.ManagedKeyParameters{ - ManagedKeySystemView: managedKeySystemView, - BackendUUID: b.backendUUID, - Context: ctx, - }, - } - } - - plaintext, err := p.DecryptWithFactory(item.DecodedContext, item.DecodedNonce, item.Ciphertext, factory, managedKeyFactory) + plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext) if err != nil { switch err.(type) { case errutil.InternalError: @@ -234,10 +182,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - batchResponseItems[i].Reference = batchInputItems[i].Reference - } resp.Data = map[string]interface{}{ "batch_results": batchResponseItems, } diff --git a/builtin/logical/transit/path_decrypt_bench_test.go b/builtin/logical/transit/path_decrypt_bench_test.go index c4dc72837ab67..67d4bc3b5d5a2 100644 --- a/builtin/logical/transit/path_decrypt_bench_test.go +++ b/builtin/logical/transit/path_decrypt_bench_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_decrypt_test.go b/builtin/logical/transit/path_decrypt_test.go index e69402c7d514f..9a6b21aa12dde 100644 --- a/builtin/logical/transit/path_decrypt_test.go +++ b/builtin/logical/transit/path_decrypt_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -22,9 +19,9 @@ func TestTransit_BatchDecryption(t *testing.T) { b, s := createBackendWithStorage(t) batchEncryptionInput := []interface{}{ - map[string]interface{}{"plaintext": "", "reference": "foo"}, // empty string - map[string]interface{}{"plaintext": "Cg==", "reference": "bar"}, // newline - map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "baz"}, + map[string]interface{}{"plaintext": ""}, // empty string + map[string]interface{}{"plaintext": "Cg=="}, // newline + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, } batchEncryptionData := map[string]interface{}{ "batch_input": batchEncryptionInput, @@ -44,7 +41,7 @@ func TestTransit_BatchDecryption(t *testing.T) { batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) batchDecryptionInput := make([]interface{}, len(batchResponseItems)) for i, item := range batchResponseItems { - batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext, "reference": item.Reference} + batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext} } batchDecryptionData := map[string]interface{}{ "batch_input": batchDecryptionInput, @@ -62,8 +59,7 @@ func TestTransit_BatchDecryption(t *testing.T) { } batchDecryptionResponseItems := resp.Data["batch_results"].([]DecryptBatchResponseItem) - // This seems fragile - expectedResult := "[{\"plaintext\":\"\",\"reference\":\"foo\"},{\"plaintext\":\"Cg==\",\"reference\":\"bar\"},{\"plaintext\":\"dGhlIHF1aWNrIGJyb3duIGZveA==\",\"reference\":\"baz\"}]" + expectedResult := "[{\"plaintext\":\"\"},{\"plaintext\":\"Cg==\"},{\"plaintext\":\"dGhlIHF1aWNrIGJyb3duIGZveA==\"}]" jsonResponse, err := json.Marshal(batchDecryptionResponseItems) if err != nil || err == nil && string(jsonResponse) != expectedResult { diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go index 78b43a64c91ca..3d8a103f5f49d 100644 --- a/builtin/logical/transit/path_encrypt.go +++ b/builtin/logical/transit/path_encrypt.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" "encoding/base64" "encoding/json" - "errors" "fmt" "net/http" "reflect" @@ -43,13 +39,6 @@ type BatchRequestItem struct { // DecodedNonce is the base64 decoded version of Nonce DecodedNonce []byte - - // Associated Data for AEAD ciphers - AssociatedData string `json:"associated_data" struct:"associated_data" mapstructure:"associated_data"` - - // Reference is an arbitrary caller supplied string value that will be placed on the - // batch response to ease correlation between inputs and outputs - Reference string `json:"reference" structs:"reference" mapstructure:"reference"` } // EncryptBatchResponseItem represents a response item for batch processing @@ -64,37 +53,11 @@ type EncryptBatchResponseItem struct { // Error, if set represents a failure encountered while encrypting a // corresponding batch request item Error string `json:"error,omitempty" structs:"error" mapstructure:"error"` - - // Reference is an arbitrary caller supplied string value that will be placed on the - // batch response to ease correlation between inputs and outputs - Reference string `json:"reference"` -} - -type AssocDataFactory struct { - Encoded string -} - -func (a AssocDataFactory) GetAssociatedData() ([]byte, error) { - return base64.StdEncoding.DecodeString(a.Encoded) -} - -type ManagedKeyFactory struct { - managedKeyParams keysutil.ManagedKeyParameters -} - -func (m ManagedKeyFactory) GetManagedKeyParameters() keysutil.ManagedKeyParameters { - return m.managedKeyParams } func (b *backend) pathEncrypt() *framework.Path { return &framework.Path{ Pattern: "encrypt/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "encrypt", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -160,17 +123,6 @@ Providing the parameter returns the given response code integer instead of a 400 HTTP 400 is still returned.`, }, - "associated_data": { - Type: framework.TypeString, - Description: ` -When using an AEAD cipher mode, such as AES-GCM, this parameter allows -passing associated data (AD/AAD) into the encryption function; this data -must be passed on subsequent decryption requests but can be transited in -plaintext. On successful decryption, both the ciphertext and the associated -data are attested not to have been tampered with. - `, - }, - "batch_input": { Type: framework.TypeSlice, Description: ` @@ -286,23 +238,6 @@ func decodeBatchRequestItems(src interface{}, requirePlaintext bool, requireCiph errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].key_version' expected type 'int', got unconvertible type '%T'", i, item["key_version"])) } } - - if v, has := item["associated_data"]; has { - if !reflect.ValueOf(v).IsValid() { - } else if casted, ok := v.(string); ok { - (*dst)[i].AssociatedData = casted - } else { - errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].associated_data' expected type 'string', got unconvertible type '%T'", i, item["associated_data"])) - } - } - if v, has := item["reference"]; has { - if !reflect.ValueOf(v).IsValid() { - } else if casted, ok := v.(string); ok { - (*dst)[i].Reference = casted - } else { - errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].reference' expected type 'string', got unconvertible type '%T'", i, item["reference"])) - } - } } if len(errs.Errors) > 0 { @@ -353,11 +288,10 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d batchInputItems = make([]BatchRequestItem, 1) batchInputItems[0] = BatchRequestItem{ - Plaintext: valueRaw.(string), - Context: d.Get("context").(string), - Nonce: d.Get("nonce").(string), - KeyVersion: d.Get("key_version").(int), - AssociatedData: d.Get("associated_data").(string), + Plaintext: valueRaw.(string), + Context: d.Get("context").(string), + Nonce: d.Get("nonce").(string), + KeyVersion: d.Get("key_version").(int), } } @@ -415,13 +349,8 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d return logical.ErrorResponse("convergent encryption requires derivation to be enabled, so context is required"), nil } - cfg, err := b.readConfigKeys(ctx, req) - if err != nil { - return nil, err - } - polReq = keysutil.PolicyRequest{ - Upsert: !cfg.DisableUpsert, + Upsert: true, Storage: req.Storage, Name: name, Derived: contextSet, @@ -438,8 +367,6 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d polReq.KeyType = keysutil.KeyType_ChaCha20_Poly1305 case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": return logical.ErrorResponse(fmt.Sprintf("key type %v not supported for this operation", keyType)), logical.ErrInvalidRequest - case "managed_key": - polReq.KeyType = keysutil.KeyType_MANAGED_KEY default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -475,33 +402,7 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d warnAboutNonceUsage = true } - var factory interface{} - if item.AssociatedData != "" { - if !p.Type.AssociatedDataSupported() { - batchResponseItems[i].Error = fmt.Sprintf("'[%d].associated_data' provided for non-AEAD cipher suite %v", i, p.Type.String()) - continue - } - - factory = AssocDataFactory{item.AssociatedData} - } - - var managedKeyFactory ManagedKeyFactory - if p.Type == keysutil.KeyType_MANAGED_KEY { - managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) - if !ok { - batchResponseItems[i].Error = errors.New("unsupported system view").Error() - } - - managedKeyFactory = ManagedKeyFactory{ - managedKeyParams: keysutil.ManagedKeyParameters{ - ManagedKeySystemView: managedKeySystemView, - BackendUUID: b.backendUUID, - Context: ctx, - }, - } - } - - ciphertext, err := p.EncryptWithFactory(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext, factory, managedKeyFactory) + ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext) if err != nil { switch err.(type) { case errutil.InternalError: @@ -531,10 +432,6 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - batchResponseItems[i].Reference = batchInputItems[i].Reference - } resp.Data = map[string]interface{}{ "batch_results": batchResponseItems, } diff --git a/builtin/logical/transit/path_encrypt_bench_test.go b/builtin/logical/transit/path_encrypt_bench_test.go index 8aef39dd4b724..e648c6e02fc32 100644 --- a/builtin/logical/transit/path_encrypt_bench_test.go +++ b/builtin/logical/transit/path_encrypt_bench_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go index b886d4fefd8f6..734a92524b558 100644 --- a/builtin/logical/transit/path_encrypt_test.go +++ b/builtin/logical/transit/path_encrypt_test.go @@ -1,19 +1,14 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" "encoding/json" - "fmt" "reflect" "strings" "testing" "github.com/hashicorp/vault/sdk/helper/keysutil" - uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -230,7 +225,7 @@ func TestTransit_BatchEncryptionCase3(t *testing.T) { } } -// Case4: Test batch encryption with an existing key (and test references) +// Case4: Test batch encryption with an existing key func TestTransit_BatchEncryptionCase4(t *testing.T) { var resp *logical.Response var err error @@ -248,8 +243,8 @@ func TestTransit_BatchEncryptionCase4(t *testing.T) { } batchInput := []interface{}{ - map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "b"}, - map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "a"}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, } batchData := map[string]interface{}{ @@ -276,7 +271,7 @@ func TestTransit_BatchEncryptionCase4(t *testing.T) { plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" - for i, item := range batchResponseItems { + for _, item := range batchResponseItems { if item.KeyVersion != 1 { t.Fatalf("unexpected key version; got: %d, expected: %d", item.KeyVersion, 1) } @@ -292,10 +287,6 @@ func TestTransit_BatchEncryptionCase4(t *testing.T) { if resp.Data["plaintext"] != plaintext { t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) } - inputItem := batchInput[i].(map[string]interface{}) - if item.Reference != inputItem["reference"] { - t.Fatalf("reference mismatch. Expected %s, Actual: %s", inputItem["reference"], item.Reference) - } } } @@ -946,48 +937,3 @@ func TestShouldWarnAboutNonceUsage(t *testing.T) { } } } - -func TestTransit_EncryptWithRSAPublicKey(t *testing.T) { - generateKeys(t) - b, s := createBackendWithStorage(t) - keyType := "rsa-2048" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get key - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Import key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import public key: %s", err) - } - - req = &logical.Request{ - Operation: logical.CreateOperation, - Path: fmt.Sprintf("encrypt/%s", keyID), - Storage: s, - Data: map[string]interface{}{ - "plaintext": "bXkgc2VjcmV0IGRhdGE=", - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatal(err) - } -} diff --git a/builtin/logical/transit/path_export.go b/builtin/logical/transit/path_export.go index 9239aee94bd4d..61bb5e595ca66 100644 --- a/builtin/logical/transit/path_export.go +++ b/builtin/logical/transit/path_export.go @@ -1,12 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" "crypto/ecdsa" "crypto/elliptic" + "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/pem" @@ -24,23 +22,15 @@ const ( exportTypeEncryptionKey = "encryption-key" exportTypeSigningKey = "signing-key" exportTypeHMACKey = "hmac-key" - exportTypePublicKey = "public-key" ) func (b *backend) pathExportKeys() *framework.Path { return &framework.Path{ Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "export", - OperationSuffix: "key|key-version", - }, - Fields: map[string]*framework.FieldSchema{ "type": { Type: framework.TypeString, - Description: "Type of key to export (encryption-key, signing-key, hmac-key, public-key)", + Description: "Type of key to export (encryption-key, signing-key, hmac-key)", }, "name": { Type: framework.TypeString, @@ -70,7 +60,6 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request case exportTypeEncryptionKey: case exportTypeSigningKey: case exportTypeHMACKey: - case exportTypePublicKey: default: return logical.ErrorResponse(fmt.Sprintf("invalid export type: %s", exportType)), logical.ErrInvalidRequest } @@ -90,8 +79,8 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request } defer p.Unlock() - if !p.Exportable && exportType != exportTypePublicKey { - return logical.ErrorResponse("private key material is not exportable"), nil + if !p.Exportable { + return logical.ErrorResponse("key is not exportable"), nil } switch exportType { @@ -174,11 +163,7 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - rsaKey, err := encodeRSAPrivateKey(key) - if err != nil { - return "", err - } - return rsaKey, nil + return encodeRSAPrivateKey(key.RSAKey), nil } case exportTypeSigningKey: @@ -200,92 +185,26 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return ecKey, nil case keysutil.KeyType_ED25519: - if len(key.Key) == 0 { - return "", nil - } - return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - rsaKey, err := encodeRSAPrivateKey(key) - if err != nil { - return "", err - } - return rsaKey, nil - } - case exportTypePublicKey: - switch policy.Type { - case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: - var curve elliptic.Curve - switch policy.Type { - case keysutil.KeyType_ECDSA_P384: - curve = elliptic.P384() - case keysutil.KeyType_ECDSA_P521: - curve = elliptic.P521() - default: - curve = elliptic.P256() - } - ecKey, err := keyEntryToECPublicKey(key, curve) - if err != nil { - return "", err - } - return ecKey, nil - - case keysutil.KeyType_ED25519: - return strings.TrimSpace(key.FormattedPublicKey), nil - - case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - rsaKey, err := encodeRSAPublicKey(key) - if err != nil { - return "", err - } - return rsaKey, nil + return encodeRSAPrivateKey(key.RSAKey), nil } } - return "", fmt.Errorf("unknown key type %v for export type %v", policy.Type, exportType) + return "", fmt.Errorf("unknown key type %v", policy.Type) } -func encodeRSAPrivateKey(key *keysutil.KeyEntry) (string, error) { - if key == nil { - return "", errors.New("nil KeyEntry provided") - } - - if key.IsPrivateKeyMissing() { - return "", nil - } - +func encodeRSAPrivateKey(key *rsa.PrivateKey) string { // When encoding PKCS1, the PEM header should be `RSA PRIVATE KEY`. When Go // has PKCS8 encoding support, we may want to change this. - blockType := "RSA PRIVATE KEY" - derBytes := x509.MarshalPKCS1PrivateKey(key.RSAKey) - pemBlock := pem.Block{ - Type: blockType, + derBytes := x509.MarshalPKCS1PrivateKey(key) + pemBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", Bytes: derBytes, } - - pemBytes := pem.EncodeToMemory(&pemBlock) - return string(pemBytes), nil -} - -func encodeRSAPublicKey(key *keysutil.KeyEntry) (string, error) { - if key == nil { - return "", errors.New("nil KeyEntry provided") - } - - blockType := "RSA PUBLIC KEY" - derBytes, err := x509.MarshalPKIXPublicKey(key.RSAPublicKey) - if err != nil { - return "", err - } - - pemBlock := pem.Block{ - Type: blockType, - Bytes: derBytes, - } - - pemBytes := pem.EncodeToMemory(&pemBlock) - return string(pemBytes), nil + pemBytes := pem.EncodeToMemory(pemBlock) + return string(pemBytes) } func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { @@ -293,57 +212,27 @@ func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, return "", errors.New("nil KeyEntry provided") } - if k.IsPrivateKeyMissing() { - return "", nil - } - - pubKey := ecdsa.PublicKey{ - Curve: curve, - X: k.EC_X, - Y: k.EC_Y, - } - - blockType := "EC PRIVATE KEY" privKey := &ecdsa.PrivateKey{ - PublicKey: pubKey, - D: k.EC_D, + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + }, + D: k.EC_D, } - derBytes, err := x509.MarshalECPrivateKey(privKey) + ecder, err := x509.MarshalECPrivateKey(privKey) if err != nil { return "", err } - - pemBlock := pem.Block{ - Type: blockType, - Bytes: derBytes, - } - - return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil -} - -func keyEntryToECPublicKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { - if k == nil { - return "", errors.New("nil KeyEntry provided") - } - - pubKey := ecdsa.PublicKey{ - Curve: curve, - X: k.EC_X, - Y: k.EC_Y, + if ecder == nil { + return "", errors.New("no data returned when marshalling to private key") } - blockType := "PUBLIC KEY" - derBytes, err := x509.MarshalPKIXPublicKey(&pubKey) - if err != nil { - return "", err + block := pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: ecder, } - - pemBlock := pem.Block{ - Type: blockType, - Bytes: derBytes, - } - - return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil + return strings.TrimSpace(string(pem.EncodeToMemory(&block))), nil } const pathExportHelpSyn = `Export named encryption or signing key` diff --git a/builtin/logical/transit/path_export_test.go b/builtin/logical/transit/path_export_test.go index cd25383f2b060..b44a4e7e0fe26 100644 --- a/builtin/logical/transit/path_export_test.go +++ b/builtin/logical/transit/path_export_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_hash.go b/builtin/logical/transit/path_hash.go index ecf619a52e448..51ca37daa2317 100644 --- a/builtin/logical/transit/path_hash.go +++ b/builtin/logical/transit/path_hash.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -21,13 +18,6 @@ import ( func (b *backend) pathHash() *framework.Path { return &framework.Path{ Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "hash", - OperationSuffix: "|with-algorithm", - }, - Fields: map[string]*framework.FieldSchema{ "input": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_hash_test.go b/builtin/logical/transit/path_hash_test.go index 084012dd4bafe..3e5dce95c2999 100644 --- a/builtin/logical/transit/path_hash_test.go +++ b/builtin/logical/transit/path_hash_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go index 704a1b866cfff..6639231174ba5 100644 --- a/builtin/logical/transit/path_hmac.go +++ b/builtin/logical/transit/path_hmac.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" "crypto/hmac" "encoding/base64" - "errors" "fmt" "strconv" "strings" @@ -39,22 +35,11 @@ type batchResponseHMACItem struct { // For batch processing to successfully mimic previous handling for simple 'input', // both output values are needed - though 'err' should never be serialized. err error - - // Reference is an arbitrary caller supplied string value that will be placed on the - // batch response to ease correlation between inputs and outputs - Reference string `json:"reference" mapstructure:"reference"` } func (b *backend) pathHMAC() *framework.Path { return &framework.Path{ Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "generate", - OperationSuffix: "hmac|hmac-with-algorithm", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -154,7 +139,7 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr p.Unlock() return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if key == nil && p.Type != keysutil.KeyType_MANAGED_KEY { + if key == nil { p.Unlock() return nil, fmt.Errorf("HMAC key value could not be computed") } @@ -210,23 +195,9 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr continue } - var retBytes []byte - - if p.Type == keysutil.KeyType_MANAGED_KEY { - managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) - if !ok { - response[i].err = errors.New("unsupported system view") - } - - retBytes, err = p.HMACWithManagedKey(ctx, ver, managedKeySystemView, b.backendUUID, algorithm, input) - if err != nil { - response[i].err = err - } - } else { - hf := hmac.New(hashAlg, key) - hf.Write(input) - retBytes = hf.Sum(nil) - } + hf := hmac.New(hashAlg, key) + hf.Write(input) + retBytes := hf.Sum(nil) retStr := base64.StdEncoding.EncodeToString(retBytes) retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(ver), retStr) @@ -238,10 +209,6 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr // Generate the response resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - response[i].Reference = batchInputItems[i]["reference"] - } resp.Data = map[string]interface{}{ "batch_results": response, } @@ -403,10 +370,6 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f // Generate the response resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - response[i].Reference = batchInputItems[i]["reference"] - } resp.Data = map[string]interface{}{ "batch_results": response, } diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go index af98dd2ca081a..16dbb1a4903b1 100644 --- a/builtin/logical/transit/path_hmac_test.go +++ b/builtin/logical/transit/path_hmac_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -251,18 +248,18 @@ func TestTransit_batchHMAC(t *testing.T) { req.Path = "hmac/foo" batchInput := []batchRequestHMACItem{ - {"input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "one"}, - {"input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "two"}, - {"input": "", "reference": "three"}, - {"input": ":;.?", "reference": "four"}, + {"input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + {"input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + {"input": ""}, + {"input": ":;.?"}, {}, } expected := []batchResponseHMACItem{ - {HMAC: "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=", Reference: "one"}, - {HMAC: "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=", Reference: "two"}, - {HMAC: "vault:v1:BCfVv6rlnRsIKpjCZCxWvh5iYwSSabRXpX9XJniuNgc=", Reference: "three"}, - {Error: "unable to decode input as base64: illegal base64 data at input byte 0", Reference: "four"}, + {HMAC: "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="}, + {HMAC: "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="}, + {HMAC: "vault:v1:BCfVv6rlnRsIKpjCZCxWvh5iYwSSabRXpX9XJniuNgc="}, + {Error: "unable to decode input as base64: illegal base64 data at input byte 0"}, {Error: "missing input for HMAC"}, } @@ -289,9 +286,6 @@ func TestTransit_batchHMAC(t *testing.T) { if expected[i].Error != "" && expected[i].Error != m.Error { t.Fatalf("Expected Error %q got %q in result %d", expected[i].Error, m.Error, i) } - if expected[i].Reference != m.Reference { - t.Fatalf("Expected references to match, Got %s, Expected %s", m.Reference, expected[i].Reference) - } } // Verify a previous version diff --git a/builtin/logical/transit/path_import.go b/builtin/logical/transit/path_import.go index 45cb4dd108f22..817cf5fc5ddf2 100644 --- a/builtin/logical/transit/path_import.go +++ b/builtin/logical/transit/path_import.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -28,13 +25,6 @@ const EncryptedKeyBytes = 512 func (b *backend) pathImport() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/import", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "import", - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -59,10 +49,6 @@ ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, }, - "public_key": { - Type: framework.TypeString, - Description: `The plaintext PEM public key to be imported. If "ciphertext" is set, this field is ignored.`, - }, "allow_rotation": { Type: framework.TypeBool, Description: "True if the imported key may be rotated within Vault; false otherwise.", @@ -115,13 +101,6 @@ key.`, func (b *backend) pathImportVersion() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/import_version", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "import", - OperationSuffix: "key-version", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -132,21 +111,12 @@ func (b *backend) pathImportVersion() *framework.Path { Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, }, - "public_key": { - Type: framework.TypeString, - Description: `The plaintext public key to be imported. If "ciphertext" is set, this field is ignored.`, - }, "hash_function": { Type: framework.TypeString, Default: "SHA256", Description: `The hash function used as a random oracle in the OAEP wrapping of the user-generated, ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", or "SHA512"`, }, - "version": { - Type: framework.TypeInt, - Description: `Key version to be updated, if left empty, a new version will be created unless -a private key is specified and the 'Latest' key is missing a private key.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathImportVersionWrite, @@ -160,9 +130,11 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * name := d.Get("name").(string) derived := d.Get("derived").(bool) keyType := d.Get("type").(string) + hashFnStr := d.Get("hash_function").(string) exportable := d.Get("exportable").(bool) allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) + ciphertextString := d.Get("ciphertext").(string) allowRotation := d.Get("allow_rotation").(bool) // Ensure the caller didn't supply "convergent_encryption" as a field, since it's not supported on import. @@ -174,12 +146,6 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return nil, errors.New("allow_rotation must be set to true if auto-rotation is enabled") } - // Ensure that at least on `key` field has been set - isCiphertextSet, err := checkKeyFieldsSet(d) - if err != nil { - return nil, err - } - polReq := keysutil.PolicyRequest{ Storage: req.Storage, Name: name, @@ -188,7 +154,6 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * AllowPlaintextBackup: allowPlaintextBackup, AutoRotatePeriod: autoRotatePeriod, AllowImportedKeyRotation: allowRotation, - IsPrivateKey: isCiphertextSet, } switch strings.ToLower(keyType) { @@ -218,6 +183,11 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(fmt.Sprintf("unknown key type: %v", keyType)), logical.ErrInvalidRequest } + hashFn, err := parseHashFn(hashFnStr) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -230,9 +200,14 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return nil, errors.New("the import path cannot be used with an existing key; use import-version to rotate an existing imported key") } - key, resp, err := b.extractKeyFromFields(ctx, req, d, polReq.KeyType, isCiphertextSet) + ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) if err != nil { - return resp, err + return nil, err + } + + key, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) + if err != nil { + return nil, err } err = b.lm.ImportPolicy(ctx, polReq, key, b.GetRandomReader()) @@ -245,18 +220,20 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) + hashFnStr := d.Get("hash_function").(string) + ciphertextString := d.Get("ciphertext").(string) - isCiphertextSet, err := checkKeyFieldsSet(d) - if err != nil { - return nil, err + polReq := keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + Upsert: false, } - polReq := keysutil.PolicyRequest{ - Storage: req.Storage, - Name: name, - Upsert: false, - IsPrivateKey: isCiphertextSet, + hashFn, err := parseHashFn(hashFnStr) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } + p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -276,24 +253,15 @@ func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Reque } defer p.Unlock() - key, resp, err := b.extractKeyFromFields(ctx, req, d, p.Type, isCiphertextSet) + ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) if err != nil { - return resp, err + return nil, err } - - // Get param version if set else import a new version. - if version, ok := d.GetOk("version"); ok { - versionToUpdate := version.(int) - - // Check if given version can be updated given input - err = p.KeyVersionCanBeUpdated(versionToUpdate, isCiphertextSet) - if err == nil { - err = p.ImportPrivateKeyForVersion(ctx, req.Storage, versionToUpdate, key) - } - } else { - err = p.ImportPublicOrPrivate(ctx, req.Storage, key, isCiphertextSet, b.GetRandomReader()) + importKey, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) + if err != nil { + return nil, err } - + err = p.Import(ctx, req.Storage, importKey, b.GetRandomReader()) if err != nil { return nil, err } @@ -351,36 +319,6 @@ func (b *backend) decryptImportedKey(ctx context.Context, storage logical.Storag return importKey, nil } -func (b *backend) extractKeyFromFields(ctx context.Context, req *logical.Request, d *framework.FieldData, keyType keysutil.KeyType, isPrivateKey bool) ([]byte, *logical.Response, error) { - var key []byte - if isPrivateKey { - hashFnStr := d.Get("hash_function").(string) - hashFn, err := parseHashFn(hashFnStr) - if err != nil { - return key, logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest - } - - ciphertextString := d.Get("ciphertext").(string) - ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) - if err != nil { - return key, nil, err - } - - key, err = b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) - if err != nil { - return key, nil, err - } - } else { - publicKeyString := d.Get("public_key").(string) - if !keyType.ImportPublicKeySupported() { - return key, nil, errors.New("provided type does not support public_key import") - } - key = []byte(publicKeyString) - } - - return key, nil, nil -} - func parseHashFn(hashFn string) (hash.Hash, error) { switch strings.ToUpper(hashFn) { case "SHA1": @@ -398,29 +336,6 @@ func parseHashFn(hashFn string) (hash.Hash, error) { } } -// checkKeyFieldsSet: Checks which key fields are set. If both are set, an error is returned -func checkKeyFieldsSet(d *framework.FieldData) (bool, error) { - ciphertextSet := isFieldSet("ciphertext", d) - publicKeySet := isFieldSet("publicKey", d) - - if ciphertextSet && publicKeySet { - return false, errors.New("only one of the following fields, ciphertext and public_key, can be set") - } else if ciphertextSet { - return true, nil - } else { - return false, nil - } -} - -func isFieldSet(fieldName string, d *framework.FieldData) bool { - _, fieldSet := d.Raw[fieldName] - if !fieldSet { - return false - } - - return true -} - const ( pathImportWriteSyn = "Imports an externally-generated key into a new transit key" pathImportWriteDesc = "This path is used to import an externally-generated " + diff --git a/builtin/logical/transit/path_import_test.go b/builtin/logical/transit/path_import_test.go index cb59e8de27d7a..d31b12b454e4c 100644 --- a/builtin/logical/transit/path_import_test.go +++ b/builtin/logical/transit/path_import_test.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" - "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" @@ -13,7 +9,6 @@ import ( "crypto/rsa" "crypto/x509" "encoding/base64" - "encoding/pem" "fmt" "strconv" "sync" @@ -51,10 +46,7 @@ var ( keys = map[string]interface{}{} ) -const ( - nssFormattedEd25519Key = "MGcCAQAwFAYHKoZIzj0CAQYJKwYBBAHaRw8BBEwwSgIBAQQgfJm5R+LK4FMwGzOpemTBXksimEVOVCE8QeC+XBBfNU+hIwMhADaif7IhYx46IHcRTy1z8LeyhABep+UB8Da6olMZGx0i" - rsaPSSFormattedKey = "MIIEvAIBADALBgkqhkiG9w0BAQoEggSoMIIEpAIBAAKCAQEAiFXSBaicB534+2qMZTVzQHMjuhb4NM9hi5H4EAFiYHEBuvm2BAk58NdBK3wiMq/p7Ewu5NQI0gJ7GlcV1MBU94U6MEmWNd0ztmlz37esEDuaCDhmLEBHKRzs8Om0bY9vczcNwcnRIYusP2KMxon3Gv2C86M2Jahig70AIq0E9C7esfrlYxFnoxUfO09XyYfiHlZY59+/dhyULp/RDIvaQ0/DqSSnYmXw8vRQ1gp6DqIzxx3j8ikUrpE7MK6348keFQj1eb83Z5w8qgIdceHHH4wbIAW7qWCPJ/vIJp8Pe1NEanlef61pDut2YcljvN79ccjX/QyqwqYv6xX2uzSlpQIDAQABAoIBACtpBCAoIVJtkv9e3EhHniR55PjWYn7SP5GEz3MtNalWokHqS/H6DBhrOcWCV5NDHx1N3qqe9xYDkzX+X6Wn/gX4RmBkte79uX8OEca8wY1DpRaT+riBWQc2vh0xlPFDuC177KX1QGFJi3V9SCzZdjSCXyV7pPyVopSm4/mmlMq5ANfN8bcHAtcArP7vPzEdckJqurjwHyzsUZJa9sk3OL3rBkKy5bmoPebE1ZQ7C+9eA4u9MKSy95WpTiqMe3rRhvr6zj4bzEvzS9M4r2EdwgAn4FyDwtGdOqtfbtSLTikb73f4MSINnWbt3YPBfRC4PGjWXIN2sMG5XYC3KH+RKbsCgYEAu0HOFInH8OtWiUY0aqRKZuo7lrBczNa5gnce3ZYnNkfrPlu1Xp0SjUkEWukznBLO0N9lvG9j3ksUDTQlPoKarJb9uf/1H0tYHhHm6mP8mH87yfVn2bLb3VPeIQYb+MXnDrwNVCAtxhuHlpnXJPldeuVKeRigHUNIEs76UMiiLqMCgYEAumJxm5NrKk0LXUQmeZolLh0lM/shg8zW7Vi3Ksz5Pe4Pcmg+hTbHjZuJwK6HesljEA0JDNkS0+5hkqiS5UDnj94XfDbi08/kKbPYA12GPVSRNTJxL8q70rFnEUZuMBeL0SKMPhEfR2z5TDDZUBoO6HBUUwgJAij1EsXrBAb0BxcCgYBKS3eKKohLi/PPjy0oynpCjtiJlvuawe7kVoLGg9aW8L3jBdvV6Bf+OmQh9bhmSggIUzo4IzHKdptECdZlEMhxhY6xh14nxmr1s0Cc6oLDtmdwX4+OjioxjB7rl1Ltxwc/j1jycbn3ieCn3e3AW7e9FNARb7XHJnSoEbq65n+CZQKBgQChLPozYAL/HIrkR0fCRmM6gmemkNeFo0CFFP+oWoJ6ZIAlHjJafmmIcmVoI0TzEG3C9pLJ8nmOnYjxCyekakEUryi9+LSkGBWlXmlBV8H7DUNYrlskyfssEs8fKDmnCuWUn3yJO8NBv+HBWkjCNRaJOIIjH0KzBHoRludJnz2tVwKBgQCsQF5lvcXefNfQojbhF+9NfyhvAc7EsMTXQhP9HEj0wVqTuuqyGyu8meXEkcQPRl6yD/yZKuMREDNNck4KV2fdGekBsh8zBgpxdHQ2DcbfxZfNgv3yoX3f0grb/ApQNJb3DVW9FVRigue8XPzFOFX/demJmkUnTg3zGFnXLXjgxg==" -) +const nssFormattedEd25519Key = "MGcCAQAwFAYHKoZIzj0CAQYJKwYBBAHaRw8BBEwwSgIBAQQgfJm5R+LK4FMwGzOpemTBXksimEVOVCE8QeC+XBBfNU+hIwMhADaif7IhYx46IHcRTy1z8LeyhABep+UB8Da6olMZGx0i" func generateKeys(t *testing.T) { t.Helper() @@ -122,39 +114,6 @@ func TestTransit_ImportNSSEd25519Key(t *testing.T) { } } -func TestTransit_ImportRSAPSS(t *testing.T) { - generateKeys(t) - b, s := createBackendWithStorage(t) - - wrappingKey, err := b.getWrappingKey(context.Background(), s) - if err != nil || wrappingKey == nil { - t.Fatalf("failed to retrieve public wrapping key: %s", err) - } - privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey - pubWrappingKey := &privWrappingKey.PublicKey - - rawPKCS8, err := base64.StdEncoding.DecodeString(rsaPSSFormattedKey) - if err != nil { - t.Fatalf("failed to parse rsa-pss base64: %v", err) - } - - blob := wrapTargetPKCS8ForImport(t, pubWrappingKey, rawPKCS8, "SHA256") - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: "keys/rsa-pss/import", - Data: map[string]interface{}{ - "ciphertext": blob, - "type": "rsa-2048", - }, - } - - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import RSA-PSS private key: %v", err) - } -} - func TestTransit_Import(t *testing.T) { generateKeys(t) b, s := createBackendWithStorage(t) @@ -429,70 +388,6 @@ func TestTransit_Import(t *testing.T) { } }, ) - - t.Run( - "import public key ed25519", - func(t *testing.T) { - keyType := "ed25519" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get keys - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Import key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import ed25519 key: %v", err) - } - }) - - t.Run( - "import public key ecdsa", - func(t *testing.T) { - keyType := "ecdsa-p256" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get keys - privateKey := getKey(t, keyType) - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Import key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import public key: %s", err) - } - }) } func TestTransit_ImportVersion(t *testing.T) { @@ -639,313 +534,6 @@ func TestTransit_ImportVersion(t *testing.T) { } }, ) - - t.Run( - "import rsa public key and update version with private counterpart", - func(t *testing.T) { - keyType := "rsa-2048" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get keys - privateKey := getKey(t, keyType) - importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Import RSA public key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import public key: %s", err) - } - - // Update version - import RSA private key - req = &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import_version", keyID), - Data: map[string]interface{}{ - "ciphertext": importBlob, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to update key: %s", err) - } - }, - ) -} - -func TestTransit_ImportVersionWithPublicKeys(t *testing.T) { - generateKeys(t) - b, s := createBackendWithStorage(t) - - // Retrieve public wrapping key - wrappingKey, err := b.getWrappingKey(context.Background(), s) - if err != nil || wrappingKey == nil { - t.Fatalf("failed to retrieve public wrapping key: %s", err) - } - privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey - pubWrappingKey := &privWrappingKey.PublicKey - - // Import a public key then import private should give us one key - t.Run( - "import rsa public key and update version with private counterpart", - func(t *testing.T) { - keyType := "ecdsa-p256" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get keys - privateKey := getKey(t, keyType) - importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Import EC public key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import public key: %s", err) - } - - // Update version - import EC private key - req = &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import_version", keyID), - Data: map[string]interface{}{ - "ciphertext": importBlob, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to update key: %s", err) - } - - // We should have one key on export - req = &logical.Request{ - Storage: s, - Operation: logical.ReadOperation, - Path: fmt.Sprintf("export/public-key/%s", keyID), - } - resp, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to export key: %s", err) - } - - if len(resp.Data["keys"].(map[string]string)) != 1 { - t.Fatalf("expected 1 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) - } - }, - ) - - // Import a private and then public should give us two keys - t.Run( - "import ec private key and then its public counterpart", - func(t *testing.T) { - keyType := "ecdsa-p256" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get keys - privateKey := getKey(t, keyType) - importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") - publicKeyBytes, err := getPublicKey(privateKey, keyType) - if err != nil { - t.Fatal(err) - } - - // Import EC private key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "ciphertext": importBlob, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to update key: %s", err) - } - - // Update version - Import EC public key - req = &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import_version", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import public key: %s", err) - } - - // We should have two keys on export - req = &logical.Request{ - Storage: s, - Operation: logical.ReadOperation, - Path: fmt.Sprintf("export/public-key/%s", keyID), - } - resp, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to export key: %s", err) - } - - if len(resp.Data["keys"].(map[string]string)) != 2 { - t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) - } - }, - ) - - // Import a public and another public should allow us to insert two private key. - t.Run( - "import two public keys and two private keys in reverse order", - func(t *testing.T) { - keyType := "ecdsa-p256" - keyID, err := uuid.GenerateUUID() - if err != nil { - t.Fatalf("failed to generate key ID: %s", err) - } - - // Get keys - privateKey1 := getKey(t, keyType) - importBlob1 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey1, keyType, "SHA256") - publicKeyBytes1, err := getPublicKey(privateKey1, keyType) - if err != nil { - t.Fatal(err) - } - - privateKey2, err := generateKey(keyType) - if err != nil { - t.Fatal(err) - } - importBlob2 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey2, keyType, "SHA256") - publicKeyBytes2, err := getPublicKey(privateKey2, keyType) - if err != nil { - t.Fatal(err) - } - - // Import EC public key - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes1, - "type": keyType, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to update key: %s", err) - } - - // Update version - Import second EC public key - req = &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import_version", keyID), - Data: map[string]interface{}{ - "public_key": publicKeyBytes2, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import public key: %s", err) - } - - // We should have two keys on export - req = &logical.Request{ - Storage: s, - Operation: logical.ReadOperation, - Path: fmt.Sprintf("export/public-key/%s", keyID), - } - resp, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to export key: %s", err) - } - - if len(resp.Data["keys"].(map[string]string)) != 2 { - t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) - } - - // Import second private key first, with no options. - req = &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import_version", keyID), - Data: map[string]interface{}{ - "ciphertext": importBlob2, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import private key: %s", err) - } - - // Import first private key second, with a version - req = &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s/import_version", keyID), - Data: map[string]interface{}{ - "ciphertext": importBlob1, - "version": 1, - }, - } - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import private key: %s", err) - } - - // We should still have two keys on export - req = &logical.Request{ - Storage: s, - Operation: logical.ReadOperation, - Path: fmt.Sprintf("export/public-key/%s", keyID), - } - resp, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to export key: %s", err) - } - - if len(resp.Data["keys"].(map[string]string)) != 2 { - t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) - } - }, - ) } func wrapTargetKeyForImport(t *testing.T, wrappingKey *rsa.PublicKey, targetKey interface{}, targetKeyType string, hashFnName string) string { @@ -1036,40 +624,3 @@ func generateKey(keyType string) (interface{}, error) { return nil, fmt.Errorf("failed to generate unsupported key type: %s", keyType) } } - -func getPublicKey(privateKey crypto.PrivateKey, keyType string) ([]byte, error) { - var publicKey crypto.PublicKey - var publicKeyBytes []byte - switch keyType { - case "rsa-2048", "rsa-3072", "rsa-4096": - publicKey = privateKey.(*rsa.PrivateKey).Public() - case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": - publicKey = privateKey.(*ecdsa.PrivateKey).Public() - case "ed25519": - publicKey = privateKey.(ed25519.PrivateKey).Public() - default: - return publicKeyBytes, fmt.Errorf("failed to get public key from %s key", keyType) - } - - publicKeyBytes, err := publicKeyToBytes(publicKey) - if err != nil { - return publicKeyBytes, err - } - - return publicKeyBytes, nil -} - -func publicKeyToBytes(publicKey crypto.PublicKey) ([]byte, error) { - var publicKeyBytesPem []byte - publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) - if err != nil { - return publicKeyBytesPem, fmt.Errorf("failed to marshal public key: %s", err) - } - - pemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: publicKeyBytes, - } - - return pem.EncodeToMemory(pemBlock), nil -} diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index 285f32d33cdce..e8edabc1769c5 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" - "crypto" "crypto/elliptic" "crypto/x509" "encoding/base64" @@ -26,11 +22,6 @@ func (b *backend) pathListKeys() *framework.Path { return &framework.Path{ Pattern: "keys/?$", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationSuffix: "keys", - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKeysList, }, @@ -43,12 +34,6 @@ func (b *backend) pathListKeys() *framework.Path { func (b *backend) pathKeys() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -123,35 +108,12 @@ key.`, Default: 0, Description: fmt.Sprintf("The key size in bytes for the algorithm. Only applies to HMAC and must be no fewer than %d bytes and no more than %d", keysutil.HmacMinKeySize, keysutil.HmacMaxKeySize), }, - "managed_key_name": { - Type: framework.TypeString, - Description: "The name of the managed key to use for this transit key", - }, - "managed_key_id": { - Type: framework.TypeString, - Description: "The UUID of the managed key to use for this transit key", - }, }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathPolicyWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "create", - }, - }, - logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathPolicyDelete, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "delete", - }, - }, - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathPolicyRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "read", - }, - }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathPolicyWrite, + logical.DeleteOperation: b.pathPolicyDelete, + logical.ReadOperation: b.pathPolicyRead, }, HelpSynopsis: pathPolicyHelpSyn, @@ -177,8 +139,6 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * exportable := d.Get("exportable").(bool) allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) - managedKeyName := d.Get("managed_key_name").(string) - managedKeyId := d.Get("managed_key_id").(string) if autoRotatePeriod != 0 && autoRotatePeriod < time.Hour { return logical.ErrorResponse("auto rotate period must be 0 to disable or at least an hour"), nil @@ -222,8 +182,6 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.KeyType = keysutil.KeyType_RSA4096 case "hmac": polReq.KeyType = keysutil.KeyType_HMAC - case "managed_key": - polReq.KeyType = keysutil.KeyType_MANAGED_KEY default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -237,15 +195,6 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.KeySize = keySize } - if polReq.KeyType == keysutil.KeyType_MANAGED_KEY { - keyId, err := GetManagedKeyUUID(ctx, b, managedKeyName, managedKeyId) - if err != nil { - return nil, err - } - - polReq.ManagedKeyUUID = keyId - } - p, upserted, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -257,14 +206,12 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * p.Unlock() } - resp, err := b.formatKeyPolicy(p, nil) - if err != nil { - return nil, err - } + resp := &logical.Response{} if !upserted { resp.AddWarning(fmt.Sprintf("key %s already existed", name)) } - return resp, nil + + return nil, nil } // Built-in helper type for returning asymmetric keys @@ -292,19 +239,6 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f } defer p.Unlock() - contextRaw := d.Get("context").(string) - var context []byte - if len(contextRaw) != 0 { - context, err = base64.StdEncoding.DecodeString(contextRaw) - if err != nil { - return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest - } - } - - return b.formatKeyPolicy(p, context) -} - -func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical.Response, error) { // Return the response resp := &logical.Response{ Data: map[string]interface{}{ @@ -361,6 +295,15 @@ func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical. } } + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + switch p.Type { case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: retKeys := map[string]int64{} @@ -398,7 +341,7 @@ func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical. } derived, err := p.GetKey(context, ver, 32) if err != nil { - return nil, fmt.Errorf("failed to derive key to return public component: %w", err) + return nil, fmt.Errorf("failed to derive key to return public component") } pubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey) key.PublicKey = base64.StdEncoding.EncodeToString(pubKey) @@ -415,15 +358,9 @@ func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical. key.Name = "rsa-4096" } - var publicKey crypto.PublicKey - publicKey = v.RSAPublicKey - if !v.IsPrivateKeyMissing() { - publicKey = v.RSAKey.Public() - } - // Encode the RSA public key in PEM format to return over the // API - derBytes, err := x509.MarshalPKIXPublicKey(publicKey) + derBytes, err := x509.MarshalPKIXPublicKey(v.RSAKey.Public()) if err != nil { return nil, fmt.Errorf("error marshaling RSA public key: %w", err) } diff --git a/builtin/logical/transit/path_keys_config.go b/builtin/logical/transit/path_keys_config.go deleted file mode 100644 index 722d39c1e3b5f..0000000000000 --- a/builtin/logical/transit/path_keys_config.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package transit - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/keysutil" - "github.com/hashicorp/vault/sdk/logical" -) - -func (b *backend) pathKeysConfig() *framework.Path { - return &framework.Path{ - Pattern: "keys/" + framework.GenericNameRegex("name") + "/config", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "configure", - OperationSuffix: "key", - }, - - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the key", - }, - - "min_decryption_version": { - Type: framework.TypeInt, - Description: `If set, the minimum version of the key allowed -to be decrypted. For signing keys, the minimum -version allowed to be used for verification.`, - }, - - "min_encryption_version": { - Type: framework.TypeInt, - Description: `If set, the minimum version of the key allowed -to be used for encryption; or for signing keys, -to be used for signing. If set to zero, only -the latest version of the key is allowed.`, - }, - - "deletion_allowed": { - Type: framework.TypeBool, - Description: "Whether to allow deletion of the key", - }, - - "exportable": { - Type: framework.TypeBool, - Description: `Enables export of the key. Once set, this cannot be disabled.`, - }, - - "allow_plaintext_backup": { - Type: framework.TypeBool, - Description: `Enables taking a backup of the named key in plaintext format. Once set, this cannot be disabled.`, - }, - - "auto_rotate_period": { - Type: framework.TypeDurationSecond, - Description: `Amount of time the key should live before -being automatically rotated. A value of 0 -disables automatic rotation for the key.`, - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathKeysConfigWrite, - }, - - HelpSynopsis: pathKeysConfigHelpSyn, - HelpDescription: pathKeysConfigHelpDesc, - } -} - -func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (resp *logical.Response, retErr error) { - name := d.Get("name").(string) - - // Check if the policy already exists before we lock everything - p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ - Storage: req.Storage, - Name: name, - }, b.GetRandomReader()) - if err != nil { - return nil, err - } - if p == nil { - return logical.ErrorResponse( - fmt.Sprintf("no existing key named %s could be found", name)), - logical.ErrInvalidRequest - } - if !b.System().CachingDisabled() { - p.Lock(true) - } - defer p.Unlock() - - var warning string - - originalMinDecryptionVersion := p.MinDecryptionVersion - originalMinEncryptionVersion := p.MinEncryptionVersion - originalDeletionAllowed := p.DeletionAllowed - originalExportable := p.Exportable - originalAllowPlaintextBackup := p.AllowPlaintextBackup - - defer func() { - if retErr != nil || (resp != nil && resp.IsError()) { - p.MinDecryptionVersion = originalMinDecryptionVersion - p.MinEncryptionVersion = originalMinEncryptionVersion - p.DeletionAllowed = originalDeletionAllowed - p.Exportable = originalExportable - p.AllowPlaintextBackup = originalAllowPlaintextBackup - } - }() - - persistNeeded := false - - minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version") - if ok { - minDecryptionVersion := minDecryptionVersionRaw.(int) - - if minDecryptionVersion < 0 { - return logical.ErrorResponse("min decryption version cannot be negative"), nil - } - - if minDecryptionVersion == 0 { - minDecryptionVersion = 1 - warning = "since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1" - } - - if minDecryptionVersion != p.MinDecryptionVersion { - if minDecryptionVersion > p.LatestVersion { - return logical.ErrorResponse( - fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil - } - p.MinDecryptionVersion = minDecryptionVersion - persistNeeded = true - } - } - - minEncryptionVersionRaw, ok := d.GetOk("min_encryption_version") - if ok { - minEncryptionVersion := minEncryptionVersionRaw.(int) - - if minEncryptionVersion < 0 { - return logical.ErrorResponse("min encryption version cannot be negative"), nil - } - - if minEncryptionVersion != p.MinEncryptionVersion { - if minEncryptionVersion > p.LatestVersion { - return logical.ErrorResponse( - fmt.Sprintf("cannot set min encryption version of %d, latest key version is %d", minEncryptionVersion, p.LatestVersion)), nil - } - p.MinEncryptionVersion = minEncryptionVersion - persistNeeded = true - } - } - - // Check here to get the final picture after the logic on each - // individually. MinDecryptionVersion will always be 1 or above. - if p.MinEncryptionVersion > 0 && - p.MinEncryptionVersion < p.MinDecryptionVersion { - return logical.ErrorResponse( - fmt.Sprintf("cannot set min encryption/decryption values; min encryption version of %d must be greater than or equal to min decryption version of %d", p.MinEncryptionVersion, p.MinDecryptionVersion)), nil - } - - allowDeletionInt, ok := d.GetOk("deletion_allowed") - if ok { - allowDeletion := allowDeletionInt.(bool) - if allowDeletion != p.DeletionAllowed { - p.DeletionAllowed = allowDeletion - persistNeeded = true - } - } - - // Add this as a guard here before persisting since we now require the min - // decryption version to start at 1; even if it's not explicitly set here, - // force the upgrade - if p.MinDecryptionVersion == 0 { - p.MinDecryptionVersion = 1 - persistNeeded = true - } - - exportableRaw, ok := d.GetOk("exportable") - if ok { - exportable := exportableRaw.(bool) - // Don't unset the already set value - if exportable && !p.Exportable { - p.Exportable = exportable - persistNeeded = true - } - } - - allowPlaintextBackupRaw, ok := d.GetOk("allow_plaintext_backup") - if ok { - allowPlaintextBackup := allowPlaintextBackupRaw.(bool) - // Don't unset the already set value - if allowPlaintextBackup && !p.AllowPlaintextBackup { - p.AllowPlaintextBackup = allowPlaintextBackup - persistNeeded = true - } - } - - autoRotatePeriodRaw, ok, err := d.GetOkErr("auto_rotate_period") - if err != nil { - return nil, err - } - if ok { - autoRotatePeriod := time.Second * time.Duration(autoRotatePeriodRaw.(int)) - // Provided value must be 0 to disable or at least an hour - if autoRotatePeriod != 0 && autoRotatePeriod < time.Hour { - return logical.ErrorResponse("auto rotate period must be 0 to disable or at least an hour"), nil - } - - if autoRotatePeriod != p.AutoRotatePeriod { - p.AutoRotatePeriod = autoRotatePeriod - persistNeeded = true - } - } - - if !persistNeeded { - resp, err := b.formatKeyPolicy(p, nil) - if err != nil { - return nil, err - } - if warning != "" { - resp.AddWarning(warning) - } - return resp, nil - } - - switch { - case p.MinAvailableVersion > p.MinEncryptionVersion: - return logical.ErrorResponse("min encryption version should not be less than min available version"), nil - case p.MinAvailableVersion > p.MinDecryptionVersion: - return logical.ErrorResponse("min decryption version should not be less then min available version"), nil - } - - if err := p.Persist(ctx, req.Storage); err != nil { - return nil, err - } - - resp, err = b.formatKeyPolicy(p, nil) - if err != nil { - return nil, err - } - if warning != "" { - resp.AddWarning(warning) - } - return resp, nil -} - -const pathKeysConfigHelpSyn = `Configure a named encryption key` - -const pathKeysConfigHelpDesc = ` -This path is used to configure the named key. Currently, this -supports adjusting the minimum version of the key allowed to -be used for decryption via the min_decryption_version parameter. -` diff --git a/builtin/logical/transit/path_keys_config_test.go b/builtin/logical/transit/path_keys_config_test.go deleted file mode 100644 index 335607c3b0e1d..0000000000000 --- a/builtin/logical/transit/path_keys_config_test.go +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package transit - -import ( - "context" - "encoding/hex" - "encoding/json" - "fmt" - "strconv" - "strings" - "testing" - "time" - - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -func TestTransit_ConfigSettings(t *testing.T) { - b, storage := createBackendWithSysView(t) - - doReq := func(req *logical.Request) *logical.Response { - resp, err := b.HandleRequest(context.Background(), req) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) - } - return resp - } - doErrReq := func(req *logical.Request) { - resp, err := b.HandleRequest(context.Background(), req) - if err == nil { - if resp == nil || !resp.IsError() { - t.Fatalf("expected error; req:\n%#v\n", *req) - } - } - } - - // First create a key - req := &logical.Request{ - Storage: storage, - Operation: logical.UpdateOperation, - Path: "keys/aes256", - Data: map[string]interface{}{ - "derived": true, - }, - } - doReq(req) - - req.Path = "keys/aes128" - req.Data["type"] = "aes128-gcm96" - doReq(req) - - req.Path = "keys/ed" - req.Data["type"] = "ed25519" - doReq(req) - - delete(req.Data, "derived") - - req.Path = "keys/p256" - req.Data["type"] = "ecdsa-p256" - doReq(req) - - req.Path = "keys/p384" - req.Data["type"] = "ecdsa-p384" - doReq(req) - - req.Path = "keys/p521" - req.Data["type"] = "ecdsa-p521" - doReq(req) - - delete(req.Data, "type") - - req.Path = "keys/aes128/rotate" - doReq(req) - doReq(req) - doReq(req) - doReq(req) - - req.Path = "keys/aes256/rotate" - doReq(req) - doReq(req) - doReq(req) - doReq(req) - - req.Path = "keys/ed/rotate" - doReq(req) - doReq(req) - doReq(req) - doReq(req) - - req.Path = "keys/p256/rotate" - doReq(req) - doReq(req) - doReq(req) - doReq(req) - - req.Path = "keys/p384/rotate" - doReq(req) - doReq(req) - doReq(req) - doReq(req) - - req.Path = "keys/p521/rotate" - doReq(req) - doReq(req) - doReq(req) - doReq(req) - - req.Path = "keys/aes256/config" - // Too high - req.Data["min_decryption_version"] = 7 - doErrReq(req) - // Too low - req.Data["min_decryption_version"] = -1 - doErrReq(req) - - delete(req.Data, "min_decryption_version") - // Too high - req.Data["min_encryption_version"] = 7 - doErrReq(req) - // Too low - req.Data["min_encryption_version"] = 7 - doErrReq(req) - - // Not allowed, cannot decrypt - req.Data["min_decryption_version"] = 3 - req.Data["min_encryption_version"] = 2 - doErrReq(req) - - // Allowed - req.Data["min_decryption_version"] = 2 - req.Data["min_encryption_version"] = 3 - doReq(req) - req.Path = "keys/aes128/config" - doReq(req) - req.Path = "keys/ed/config" - doReq(req) - req.Path = "keys/p256/config" - doReq(req) - req.Path = "keys/p384/config" - doReq(req) - - req.Path = "keys/p521/config" - doReq(req) - - req.Data = map[string]interface{}{ - "plaintext": "abcd", - "input": "abcd", - "context": "abcd", - } - - maxKeyVersion := 5 - key := "aes256" - - testHMAC := func(ver int, valid bool) { - req.Path = "hmac/" + key - delete(req.Data, "hmac") - if ver == maxKeyVersion { - delete(req.Data, "key_version") - } else { - req.Data["key_version"] = ver - } - - if !valid { - doErrReq(req) - return - } - - resp := doReq(req) - ct := resp.Data["hmac"].(string) - if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { - t.Fatal("wrong hmac version") - } - - req.Path = "verify/" + key - delete(req.Data, "key_version") - req.Data["hmac"] = resp.Data["hmac"] - doReq(req) - } - - testEncryptDecrypt := func(ver int, valid bool) { - req.Path = "encrypt/" + key - delete(req.Data, "ciphertext") - if ver == maxKeyVersion { - delete(req.Data, "key_version") - } else { - req.Data["key_version"] = ver - } - - if !valid { - doErrReq(req) - return - } - - resp := doReq(req) - ct := resp.Data["ciphertext"].(string) - if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { - t.Fatal("wrong encryption version") - } - - req.Path = "decrypt/" + key - delete(req.Data, "key_version") - req.Data["ciphertext"] = resp.Data["ciphertext"] - doReq(req) - } - testEncryptDecrypt(5, true) - testEncryptDecrypt(4, true) - testEncryptDecrypt(3, true) - testEncryptDecrypt(2, false) - testHMAC(5, true) - testHMAC(4, true) - testHMAC(3, true) - testHMAC(2, false) - - key = "aes128" - testEncryptDecrypt(5, true) - testEncryptDecrypt(4, true) - testEncryptDecrypt(3, true) - testEncryptDecrypt(2, false) - testHMAC(5, true) - testHMAC(4, true) - testHMAC(3, true) - testHMAC(2, false) - - delete(req.Data, "plaintext") - req.Data["input"] = "abcd" - key = "ed" - testSignVerify := func(ver int, valid bool) { - req.Path = "sign/" + key - delete(req.Data, "signature") - if ver == maxKeyVersion { - delete(req.Data, "key_version") - } else { - req.Data["key_version"] = ver - } - - if !valid { - doErrReq(req) - return - } - - resp := doReq(req) - ct := resp.Data["signature"].(string) - if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { - t.Fatal("wrong signature version") - } - - req.Path = "verify/" + key - delete(req.Data, "key_version") - req.Data["signature"] = resp.Data["signature"] - doReq(req) - } - testSignVerify(5, true) - testSignVerify(4, true) - testSignVerify(3, true) - testSignVerify(2, false) - testHMAC(5, true) - testHMAC(4, true) - testHMAC(3, true) - testHMAC(2, false) - - delete(req.Data, "context") - key = "p256" - testSignVerify(5, true) - testSignVerify(4, true) - testSignVerify(3, true) - testSignVerify(2, false) - testHMAC(5, true) - testHMAC(4, true) - testHMAC(3, true) - testHMAC(2, false) - - key = "p384" - testSignVerify(5, true) - testSignVerify(4, true) - testSignVerify(3, true) - testSignVerify(2, false) - testHMAC(5, true) - testHMAC(4, true) - testHMAC(3, true) - testHMAC(2, false) - - key = "p521" - testSignVerify(5, true) - testSignVerify(4, true) - testSignVerify(3, true) - testSignVerify(2, false) - testHMAC(5, true) - testHMAC(4, true) - testHMAC(3, true) - testHMAC(2, false) -} - -func TestTransit_UpdateKeyConfigWithAutorotation(t *testing.T) { - tests := map[string]struct { - initialAutoRotatePeriod interface{} - newAutoRotatePeriod interface{} - shouldError bool - expectedValue time.Duration - }{ - "default (no value)": { - initialAutoRotatePeriod: "5h", - shouldError: false, - expectedValue: 5 * time.Hour, - }, - "0 (int)": { - initialAutoRotatePeriod: "5h", - newAutoRotatePeriod: 0, - shouldError: false, - expectedValue: 0, - }, - "0 (string)": { - initialAutoRotatePeriod: "5h", - newAutoRotatePeriod: 0, - shouldError: false, - expectedValue: 0, - }, - "5 seconds": { - newAutoRotatePeriod: "5s", - shouldError: true, - }, - "5 hours": { - newAutoRotatePeriod: "5h", - shouldError: false, - expectedValue: 5 * time.Hour, - }, - "negative value": { - newAutoRotatePeriod: "-1800s", - shouldError: true, - }, - "invalid string": { - newAutoRotatePeriod: "this shouldn't work", - shouldError: true, - }, - } - - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "transit": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - err := client.Sys().Mount("transit", &api.MountInput{ - Type: "transit", - }) - if err != nil { - t.Fatal(err) - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - keyNameBytes, err := uuid.GenerateRandomBytes(16) - if err != nil { - t.Fatal(err) - } - keyName := hex.EncodeToString(keyNameBytes) - - _, err = client.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ - "auto_rotate_period": test.initialAutoRotatePeriod, - }) - if err != nil { - t.Fatal(err) - } - resp, err := client.Logical().Write(fmt.Sprintf("transit/keys/%s/config", keyName), map[string]interface{}{ - "auto_rotate_period": test.newAutoRotatePeriod, - }) - switch { - case test.shouldError && err == nil: - t.Fatal("expected non-nil error") - case !test.shouldError && err != nil: - t.Fatal(err) - } - - if !test.shouldError { - resp, err = client.Logical().Read(fmt.Sprintf("transit/keys/%s", keyName)) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - gotRaw, ok := resp.Data["auto_rotate_period"].(json.Number) - if !ok { - t.Fatal("returned value is of unexpected type") - } - got, err := gotRaw.Int64() - if err != nil { - t.Fatal(err) - } - want := int64(test.expectedValue.Seconds()) - if got != want { - t.Fatalf("incorrect auto_rotate_period returned, got: %d, want: %d", got, want) - } - } - }) - } -} diff --git a/builtin/logical/transit/path_keys_test.go b/builtin/logical/transit/path_keys_test.go index 4b3303988eed5..04c1d8da092de 100644 --- a/builtin/logical/transit/path_keys_test.go +++ b/builtin/logical/transit/path_keys_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit_test import ( diff --git a/builtin/logical/transit/path_random.go b/builtin/logical/transit/path_random.go index 3fc5abef286c3..3b903e0b37fe3 100644 --- a/builtin/logical/transit/path_random.go +++ b/builtin/logical/transit/path_random.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -14,13 +11,6 @@ import ( func (b *backend) pathRandom() *framework.Path { return &framework.Path{ Pattern: "random(/" + framework.GenericNameRegex("source") + ")?" + framework.OptionalParamRegex("urlbytes"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "generate", - OperationSuffix: "random|random-with-source|random-with-bytes|random-with-source-and-bytes", - }, - Fields: map[string]*framework.FieldSchema{ "urlbytes": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go index 35782ec3eadc8..037a00b55f087 100644 --- a/builtin/logical/transit/path_random_test.go +++ b/builtin/logical/transit/path_random_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_restore.go b/builtin/logical/transit/path_restore.go index 4df9d69226ed6..fa8c142bbab36 100644 --- a/builtin/logical/transit/path_restore.go +++ b/builtin/logical/transit/path_restore.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -15,13 +12,6 @@ import ( func (b *backend) pathRestore() *framework.Path { return &framework.Path{ Pattern: "restore" + framework.OptionalParamRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "restore", - OperationSuffix: "key|and-rename-key", - }, - Fields: map[string]*framework.FieldSchema{ "backup": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_restore_test.go b/builtin/logical/transit/path_restore_test.go index 3dcc552d98b8a..6e13b985ee65d 100644 --- a/builtin/logical/transit/path_restore_test.go +++ b/builtin/logical/transit/path_restore_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_rewrap.go b/builtin/logical/transit/path_rewrap.go index 35d5c68a38882..1bdf282f7f438 100644 --- a/builtin/logical/transit/path_rewrap.go +++ b/builtin/logical/transit/path_rewrap.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -19,12 +16,6 @@ import ( func (b *backend) pathRewrap() *framework.Path { return &framework.Path{ Pattern: "rewrap/" + framework.GenericNameRegex("name"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "rewrap", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -199,10 +190,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - batchResponseItems[i].Reference = batchInputItems[i].Reference - } resp.Data = map[string]interface{}{ "batch_results": batchResponseItems, } diff --git a/builtin/logical/transit/path_rewrap_test.go b/builtin/logical/transit/path_rewrap_test.go index 097626c1c28ae..535133ac5cac4 100644 --- a/builtin/logical/transit/path_rewrap_test.go +++ b/builtin/logical/transit/path_rewrap_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -227,8 +224,8 @@ func TestTransit_BatchRewrapCase3(t *testing.T) { b, s := createBackendWithStorage(t) batchEncryptionInput := []interface{}{ - map[string]interface{}{"plaintext": "dmlzaGFsCg==", "reference": "ek"}, - map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "do"}, + map[string]interface{}{"plaintext": "dmlzaGFsCg=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, } batchEncryptionData := map[string]interface{}{ "batch_input": batchEncryptionInput, @@ -248,7 +245,7 @@ func TestTransit_BatchRewrapCase3(t *testing.T) { batchRewrapInput := make([]interface{}, len(batchEncryptionResponseItems)) for i, item := range batchEncryptionResponseItems { - batchRewrapInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext, "reference": item.Reference} + batchRewrapInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext} } batchRewrapData := map[string]interface{}{ @@ -292,11 +289,6 @@ func TestTransit_BatchRewrapCase3(t *testing.T) { for i, eItem := range batchEncryptionResponseItems { rItem := batchRewrapResponseItems[i] - inputRef := batchEncryptionInput[i].(map[string]interface{})["reference"] - if eItem.Reference != inputRef { - t.Fatalf("bad: reference mismatch. Expected %s, Actual: %s", inputRef, eItem.Reference) - } - if eItem.Ciphertext == rItem.Ciphertext { t.Fatalf("bad: rewrap input and output are the same") } @@ -323,6 +315,5 @@ func TestTransit_BatchRewrapCase3(t *testing.T) { if resp.Data["plaintext"] != plaintext1 && resp.Data["plaintext"] != plaintext2 { t.Fatalf("bad: plaintext. Expected: %q or %q, Actual: %q", plaintext1, plaintext2, resp.Data["plaintext"]) } - } } diff --git a/builtin/logical/transit/path_rotate.go b/builtin/logical/transit/path_rotate.go index 0035dcfbb9768..a74e69980512e 100644 --- a/builtin/logical/transit/path_rotate.go +++ b/builtin/logical/transit/path_rotate.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -14,26 +11,11 @@ import ( func (b *backend) pathRotate() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "rotate", - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, Description: "Name of the key", }, - "managed_key_name": { - Type: framework.TypeString, - Description: "The name of the managed key to use for the new version of this transit key", - }, - "managed_key_id": { - Type: framework.TypeString, - Description: "The UUID of the managed key to use for the new version of this transit key", - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -47,8 +29,6 @@ func (b *backend) pathRotate() *framework.Path { func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - managedKeyName := d.Get("managed_key_name").(string) - managedKeyId := d.Get("managed_key_id").(string) // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ @@ -64,26 +44,12 @@ func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d * if !b.System().CachingDisabled() { p.Lock(true) } - defer p.Unlock() - - if p.Type == keysutil.KeyType_MANAGED_KEY { - var keyId string - keyId, err = GetManagedKeyUUID(ctx, b, managedKeyName, managedKeyId) - if err != nil { - p.Unlock() - return nil, err - } - err = p.RotateManagedKey(ctx, req.Storage, keyId) - } else { - // Rotate the policy - err = p.Rotate(ctx, req.Storage, b.GetRandomReader()) - } - if err != nil { - return nil, err - } + // Rotate the policy + err = p.Rotate(ctx, req.Storage, b.GetRandomReader()) - return b.formatKeyPolicy(p, nil) + p.Unlock() + return nil, err } const pathRotateHelpSyn = `Rotate named encryption key` diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go index 98ced3812ab9f..b56ef192fe102 100644 --- a/builtin/logical/transit/path_sign_verify.go +++ b/builtin/logical/transit/path_sign_verify.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( "context" "crypto/rsa" "encoding/base64" - "errors" "fmt" "strconv" "strings" @@ -29,12 +25,12 @@ type batchResponseSignItem struct { // request item Signature string `json:"signature,omitempty" mapstructure:"signature"` - // The key version to be used for signing + // The key version to be used for encryption KeyVersion int `json:"key_version" mapstructure:"key_version"` PublicKey []byte `json:"publickey,omitempty" mapstructure:"publickey"` - // Error, if set represents a failure encountered while signing a + // Error, if set represents a failure encountered while encrypting a // corresponding batch request item Error string `json:"error,omitempty" mapstructure:"error"` @@ -43,10 +39,6 @@ type batchResponseSignItem struct { // For batch processing to successfully mimic previous handling for simple 'input', // both output values are needed - though 'err' should never be serialized. err error - - // Reference is an arbitrary caller supplied string value that will be placed on the - // batch response to ease correlation between inputs and outputs - Reference string `json:"reference" mapstructure:"reference"` } // BatchRequestVerifyItem represents a request item for batch processing. @@ -58,7 +50,7 @@ type batchResponseVerifyItem struct { // Valid indicates whether signature matches the signature derived from the input string Valid bool `json:"valid" mapstructure:"valid"` - // Error, if set represents a failure encountered while verifying a + // Error, if set represents a failure encountered while encrypting a // corresponding batch request item Error string `json:"error,omitempty" mapstructure:"error"` @@ -67,24 +59,11 @@ type batchResponseVerifyItem struct { // For batch processing to successfully mimic previous handling for simple 'input', // both output values are needed - though 'err' should never be serialized. err error - - // Reference is an arbitrary caller supplied string value that will be placed on the - // batch response to ease correlation between inputs and outputs - Reference string `json:"reference" mapstructure:"reference"` } -const defaultHashAlgorithm = "sha2-256" - func (b *backend) pathSign() *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "sign", - OperationSuffix: "|with-algorithm", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -104,7 +83,7 @@ derivation is enabled; currently only available with ed25519 keys.`, "hash_algorithm": { Type: framework.TypeString, - Default: defaultHashAlgorithm, + Default: "sha2-256", Description: `Hash algorithm to use (POST body parameter). Valid values are: * sha1 @@ -116,17 +95,14 @@ derivation is enabled; currently only available with ed25519 keys.`, * sha3-256 * sha3-384 * sha3-512 -* none Defaults to "sha2-256". Not valid for all key types, -including ed25519. Using none requires setting prehashed=true and -signature_algorithm=pkcs1v15, yielding a PKCSv1_5_NoOID instead of -the usual PKCSv1_5_DERnull signature.`, +including ed25519.`, }, "algorithm": { Type: framework.TypeString, - Default: defaultHashAlgorithm, + Default: "sha2-256", Description: `Deprecated: use "hash_algorithm" instead.`, }, @@ -187,13 +163,6 @@ preserve the order of the batch input`, func (b *backend) pathVerify() *framework.Path { return &framework.Path{ Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "verify", - OperationSuffix: "|with-algorithm", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -228,7 +197,7 @@ derivation is enabled; currently only available with ed25519 keys.`, "hash_algorithm": { Type: framework.TypeString, - Default: defaultHashAlgorithm, + Default: "sha2-256", Description: `Hash algorithm to use (POST body parameter). Valid values are: * sha1 @@ -240,15 +209,13 @@ derivation is enabled; currently only available with ed25519 keys.`, * sha3-256 * sha3-384 * sha3-512 -* none -Defaults to "sha2-256". Not valid for all key types. See note about -none on signing path.`, +Defaults to "sha2-256". Not valid for all key types.`, }, "algorithm": { Type: framework.TypeString, - Default: defaultHashAlgorithm, + Default: "sha2-256", Description: `Deprecated: use "hash_algorithm" instead.`, }, @@ -259,7 +226,7 @@ none on signing path.`, "signature_algorithm": { Type: framework.TypeString, - Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. + Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, }, @@ -329,9 +296,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr hashAlgorithmStr = d.Get("hash_algorithm").(string) if hashAlgorithmStr == "" { hashAlgorithmStr = d.Get("algorithm").(string) - if hashAlgorithmStr == "" { - hashAlgorithmStr = defaultHashAlgorithm - } } } @@ -353,10 +317,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") { - return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest - } - // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ Storage: req.Storage, @@ -366,7 +326,7 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr return nil, err } if p == nil { - return logical.ErrorResponse("signing key not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } if !b.System().CachingDisabled() { p.Lock(false) @@ -434,26 +394,11 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } - var managedKeyParameters keysutil.ManagedKeyParameters - if p.Type == keysutil.KeyType_MANAGED_KEY { - managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) - if !ok { - return nil, errors.New("unsupported system view") - } - - managedKeyParameters = keysutil.ManagedKeyParameters{ - ManagedKeySystemView: managedKeySystemView, - BackendUUID: b.backendUUID, - Context: ctx, - } - } - sig, err := p.SignWithOptions(ver, context, input, &keysutil.SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: saltLength, - SigAlgorithm: sigAlgorithm, - ManagedKeyParams: managedKeyParameters, + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: saltLength, + SigAlgorithm: sigAlgorithm, }) if err != nil { if batchInputRaw != nil { @@ -477,10 +422,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr // Generate the response resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - response[i].Reference = batchInputItems[i]["reference"] - } resp.Data = map[string]interface{}{ "batch_results": response, } @@ -582,9 +523,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * hashAlgorithmStr = d.Get("hash_algorithm").(string) if hashAlgorithmStr == "" { hashAlgorithmStr = d.Get("algorithm").(string) - if hashAlgorithmStr == "" { - hashAlgorithmStr = defaultHashAlgorithm - } } } @@ -606,10 +544,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") { - return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest - } - // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ Storage: req.Storage, @@ -619,7 +553,7 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * return nil, err } if p == nil { - return logical.ErrorResponse("signature verification key not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } if !b.System().CachingDisabled() { p.Lock(false) @@ -671,29 +605,13 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * continue } } - var managedKeyParameters keysutil.ManagedKeyParameters - if p.Type == keysutil.KeyType_MANAGED_KEY { - managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) - if !ok { - return nil, errors.New("unsupported system view") - } - managedKeyParameters = keysutil.ManagedKeyParameters{ - ManagedKeySystemView: managedKeySystemView, - BackendUUID: b.backendUUID, - Context: ctx, - } - } - - signingOptions := &keysutil.SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: saltLength, - SigAlgorithm: sigAlgorithm, - ManagedKeyParams: managedKeyParameters, - } - - valid, err := p.VerifySignatureWithOptions(context, input, sig, signingOptions) + valid, err := p.VerifySignatureWithOptions(context, input, sig, &keysutil.SigningOptions{ + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: saltLength, + SigAlgorithm: sigAlgorithm, + }) if err != nil { switch err.(type) { case errutil.UserError: @@ -713,10 +631,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * // Generate the response resp := &logical.Response{} if batchInputRaw != nil { - // Copy the references - for i := range batchInputItems { - response[i].Reference = batchInputItems[i]["reference"] - } resp.Data = map[string]interface{}{ "batch_results": response, } diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go index 63aef9c80b6f1..fa411b0a2c8c2 100644 --- a/builtin/logical/transit/path_sign_verify_test.go +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -28,7 +25,6 @@ type signOutcome struct { requestOk bool valid bool keyValid bool - reference string } func TestTransit_SignVerify_ECDSA(t *testing.T) { @@ -158,7 +154,7 @@ func testTransit_SignVerify_ECDSA(t *testing.T, bits int) { req.Path = "sign/foo" + postpath resp, err := b.HandleRequest(context.Background(), req) if err != nil && !errExpected { - t.Fatalf("request: %v\nerror: %v", req, err) + t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") @@ -487,7 +483,6 @@ func TestTransit_SignVerify_ED25519(t *testing.T) { } for i, v := range sig { batchRequestItems[i]["signature"] = v - batchRequestItems[i]["reference"] = outcome[i].reference } } else if attachSig { req.Data["signature"] = sig[0] @@ -540,9 +535,6 @@ func TestTransit_SignVerify_ED25519(t *testing.T) { if pubKeyRaw, ok := req.Data["public_key"]; ok { validatePublicKey(t, batchRequestItems[i]["input"], sig[i], pubKeyRaw.([]byte), outcome[i].keyValid, postpath, b) } - if v.Reference != outcome[i].reference { - t.Fatalf("verification failed, mismatched references %s vs %s", v.Reference, outcome[i].reference) - } } return } @@ -642,18 +634,15 @@ func TestTransit_SignVerify_ED25519(t *testing.T) { // Test Batch Signing batchInput := []batchRequestSignItem{ - {"context": "abcd", "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "uno"}, - {"context": "efgh", "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "dos"}, + {"context": "abcd", "input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + {"context": "efgh", "input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, } req.Data = map[string]interface{}{ "batch_input": batchInput, } - outcome = []signOutcome{ - {requestOk: true, valid: true, keyValid: true, reference: "uno"}, - {requestOk: true, valid: true, keyValid: true, reference: "dos"}, - } + outcome = []signOutcome{{requestOk: true, valid: true, keyValid: true}, {requestOk: true, valid: true, keyValid: true}} sig = signRequest(req, false, "foo") verifyRequest(req, false, outcome, "foo", sig, true) @@ -961,9 +950,6 @@ func testTransit_SignVerify_RSA_PSS(t *testing.T, bits int) { for hashAlgorithm := range keysutil.HashTypeMap { t.Log("Hash algorithm:", hashAlgorithm) - if hashAlgorithm == "none" { - continue - } for marshalingName := range keysutil.MarshalingTypeMap { t.Log("\t", "Marshaling type:", marshalingName) diff --git a/builtin/logical/transit/path_trim.go b/builtin/logical/transit/path_trim.go index 71f4181db859a..60d6ef9dda6d8 100644 --- a/builtin/logical/transit/path_trim.go +++ b/builtin/logical/transit/path_trim.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -14,13 +11,6 @@ import ( func (b *backend) pathTrim() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/trim", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationVerb: "trim", - OperationSuffix: "key", - }, - Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -100,7 +90,7 @@ func (b *backend) pathTrimUpdate() framework.OperationFunc { return nil, err } - return b.formatKeyPolicy(p, nil) + return nil, nil } } diff --git a/builtin/logical/transit/path_trim_test.go b/builtin/logical/transit/path_trim_test.go index b63d644cba8ce..db38aad938ad0 100644 --- a/builtin/logical/transit/path_trim_test.go +++ b/builtin/logical/transit/path_trim_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/path_wrapping_key.go b/builtin/logical/transit/path_wrapping_key.go index f27a32ade5045..1a08318db339d 100644 --- a/builtin/logical/transit/path_wrapping_key.go +++ b/builtin/logical/transit/path_wrapping_key.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -20,10 +17,6 @@ const WrappingKeyName = "wrapping-key" func (b *backend) pathWrappingKey() *framework.Path { return &framework.Path{ Pattern: "wrapping_key", - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixTransit, - OperationSuffix: "wrapping-key", - }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathWrappingKeyRead, }, diff --git a/builtin/logical/transit/path_wrapping_key_test.go b/builtin/logical/transit/path_wrapping_key_test.go index 468c3f4de7ee2..da90585a4ac99 100644 --- a/builtin/logical/transit/path_wrapping_key_test.go +++ b/builtin/logical/transit/path_wrapping_key_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( diff --git a/builtin/logical/transit/stepwise_test.go b/builtin/logical/transit/stepwise_test.go index 2b40cea213226..b64aca9861e99 100644 --- a/builtin/logical/transit/stepwise_test.go +++ b/builtin/logical/transit/stepwise_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package transit import ( @@ -21,7 +18,7 @@ func TestAccBackend_basic_docker(t *testing.T) { decryptData := make(map[string]interface{}) envOptions := stepwise.MountOptions{ RegistryName: "updatedtransit", - PluginType: api.PluginTypeSecrets, + PluginType: stepwise.PluginTypeSecrets, PluginName: "transit", MountPathPrefix: "transit_temp", } diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go index 04606bcbd2883..b165a10c1ec11 100644 --- a/builtin/plugin/backend.go +++ b/builtin/plugin/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -86,15 +83,23 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, runningVersion = versioner.PluginVersion().Version } + external := false + if externaler, ok := raw.(logical.Externaler); ok { + external = externaler.IsExternal() + } + // Cleanup meta plugin backend raw.Cleanup(ctx) // Initialize b.Backend with placeholder backend since plugin // backends will need to be lazy loaded. - b.Backend = &framework.Backend{ - PathsSpecial: paths, - BackendType: btype, - RunningVersion: runningVersion, + b.Backend = &placeholderBackend{ + Backend: framework.Backend{ + PathsSpecial: paths, + BackendType: btype, + RunningVersion: runningVersion, + }, + external: external, } b.config = conf @@ -102,6 +107,23 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, return &b, nil } +// placeholderBackend is used a placeholder before a backend is lazy-loaded. +// It is mostly used to mark that the backend is an external backend. +type placeholderBackend struct { + framework.Backend + + external bool +} + +func (p *placeholderBackend) IsExternal() bool { + return p.external +} + +var ( + _ logical.Externaler = (*placeholderBackend)(nil) + _ logical.PluginVersioner = (*placeholderBackend)(nil) +) + // PluginBackend is a thin wrapper around plugin.BackendPluginClient type PluginBackend struct { Backend logical.Backend @@ -301,4 +323,14 @@ func (b *PluginBackend) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -var _ logical.PluginVersioner = (*PluginBackend)(nil) +func (b *PluginBackend) IsExternal() bool { + if externaler, ok := b.Backend.(logical.Externaler); ok { + return externaler.IsExternal() + } + return false +} + +var ( + _ logical.PluginVersioner = (*PluginBackend)(nil) + _ logical.Externaler = (*PluginBackend)(nil) +) diff --git a/builtin/plugin/backend_lazyLoad_test.go b/builtin/plugin/backend_lazyLoad_test.go index b2f6303ba6ac9..4d2727037adca 100644 --- a/builtin/plugin/backend_lazyLoad_test.go +++ b/builtin/plugin/backend_lazyLoad_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go index 28dd1e348331e..d7a678ba1f6b6 100644 --- a/builtin/plugin/backend_test.go +++ b/builtin/plugin/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin_test import ( diff --git a/builtin/plugin/mock_plugin_test.go b/builtin/plugin/mock_plugin_test.go index 9279c828f1da3..532b7c763286d 100644 --- a/builtin/plugin/mock_plugin_test.go +++ b/builtin/plugin/mock_plugin_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/builtin/plugin/v5/backend.go b/builtin/plugin/v5/backend.go index eac311b4ad60d..3f7a9a884ce77 100644 --- a/builtin/plugin/v5/backend.go +++ b/builtin/plugin/v5/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/changelog/10299.txt b/changelog/10299.txt deleted file mode 100644 index db135b64e7945..0000000000000 --- a/changelog/10299.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Add algorithm-signer as a SSH Secrets Engine UI field -``` diff --git a/changelog/14229.txt b/changelog/14229.txt deleted file mode 100644 index 5fa7a2d2301ae..0000000000000 --- a/changelog/14229.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:change -core: Vault version has been moved out of sdk and into main vault module. -Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. -``` diff --git a/changelog/14945.txt b/changelog/14945.txt deleted file mode 100644 index 50dea7159f4ec..0000000000000 --- a/changelog/14945.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. -``` diff --git a/changelog/15869.txt b/changelog/15869.txt deleted file mode 100644 index bb0278dccf98a..0000000000000 --- a/changelog/15869.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls -``` \ No newline at end of file diff --git a/changelog/16224.txt b/changelog/16224.txt deleted file mode 100644 index 822b24504df59..0000000000000 --- a/changelog/16224.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: fix GPG encryption to support subkeys. -``` diff --git a/changelog/16622.txt b/changelog/16622.txt deleted file mode 100644 index 37ae5abb5a884..0000000000000 --- a/changelog/16622.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/ssh: Evaluate ssh validprincipals user template before splitting -``` diff --git a/changelog/16688.txt b/changelog/16688.txt index c7310fd3127d6..ce192d5f220f8 100644 --- a/changelog/16688.txt +++ b/changelog/16688.txt @@ -4,3 +4,6 @@ plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed ```release-note:change plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. ``` +```release-note:improvement +plugins: Plugin catalog supports registering and managing plugins with semantic version information. +``` diff --git a/changelog/16700.txt b/changelog/16700.txt deleted file mode 100644 index 9dc8d1e2d199e..0000000000000 --- a/changelog/16700.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. -``` diff --git a/changelog/16872.txt b/changelog/16872.txt deleted file mode 100644 index 5bbad14067bd5..0000000000000 --- a/changelog/16872.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: fix incorrectly used loop variables in parallel tests and when finalizing seals -``` diff --git a/changelog/16982.txt b/changelog/16982.txt deleted file mode 100644 index 70d2521a54b8f..0000000000000 --- a/changelog/16982.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. -``` \ No newline at end of file diff --git a/changelog/17086.txt b/changelog/17086.txt deleted file mode 100644 index ce221ad32c925..0000000000000 --- a/changelog/17086.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -ui: Upgrade Ember to version 4.4.0 -``` \ No newline at end of file diff --git a/changelog/17093.txt b/changelog/17093.txt deleted file mode 100644 index a51f3de8ff4be..0000000000000 --- a/changelog/17093.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/cert: Add configurable support for validating client certs with OCSP. -``` \ No newline at end of file diff --git a/changelog/17104.txt b/changelog/17104.txt deleted file mode 100644 index 00c5eebf39d85..0000000000000 --- a/changelog/17104.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:change -auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. -This will only be used internally for implementing user lockout. -``` \ No newline at end of file diff --git a/changelog/17167.txt b/changelog/17167.txt deleted file mode 100644 index 89b8905490632..0000000000000 --- a/changelog/17167.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:change -plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. -``` -```release-note:change -plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. -``` \ No newline at end of file diff --git a/changelog/17186.txt b/changelog/17186.txt deleted file mode 100644 index d086dd0ef0aab..0000000000000 --- a/changelog/17186.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: fix race when using SystemView.ReplicationState outside of a request context -``` \ No newline at end of file diff --git a/changelog/17187.txt b/changelog/17187.txt deleted file mode 100644 index 71476ef3169e6..0000000000000 --- a/changelog/17187.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Refactor lock grabbing code to simplify stateLock deadlock investigations -``` \ No newline at end of file diff --git a/changelog/17265.txt b/changelog/17265.txt deleted file mode 100644 index fe2d3b9a00aa7..0000000000000 --- a/changelog/17265.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. -``` diff --git a/changelog/17289.txt b/changelog/17289.txt deleted file mode 100644 index e8df6cab99ab9..0000000000000 --- a/changelog/17289.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. -``` \ No newline at end of file diff --git a/changelog/17308.txt b/changelog/17308.txt deleted file mode 100644 index 5a2bf8bace9a3..0000000000000 --- a/changelog/17308.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement: -cli: User-requested -help text now appears on stdout for paging rather than stderr -``` diff --git a/changelog/17338.txt b/changelog/17338.txt deleted file mode 100644 index 00b537bffb9dc..0000000000000 --- a/changelog/17338.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods -``` \ No newline at end of file diff --git a/changelog/17339.txt b/changelog/17339.txt deleted file mode 100644 index 4ab14210ecb41..0000000000000 --- a/changelog/17339.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. -``` diff --git a/changelog/17376.txt b/changelog/17376.txt deleted file mode 100644 index 0b2f4e40dd4c9..0000000000000 --- a/changelog/17376.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Remove default value of 30 to TtlPicker2 if no value is passed in. -``` diff --git a/changelog/17395.txt b/changelog/17395.txt deleted file mode 100644 index f7ce602949af6..0000000000000 --- a/changelog/17395.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug: -api: Fix to account for older versions of Vault with no `custom_metadata` support -``` diff --git a/changelog/17459.txt b/changelog/17459.txt deleted file mode 100644 index fd240c5376758..0000000000000 --- a/changelog/17459.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/identity: Add machine-readable output to body of response upon alias clash during entity merge -``` \ No newline at end of file diff --git a/changelog/17499.txt b/changelog/17499.txt deleted file mode 100644 index f3b1831d62774..0000000000000 --- a/changelog/17499.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Update protoc from 3.21.5 to 3.21.7 -``` \ No newline at end of file diff --git a/changelog/17514.txt b/changelog/17514.txt deleted file mode 100644 index 215ea83295d07..0000000000000 --- a/changelog/17514.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy -``` \ No newline at end of file diff --git a/changelog/17540.txt b/changelog/17540.txt deleted file mode 100644 index 3915eae2af6cd..0000000000000 --- a/changelog/17540.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a -Virtual Machine Scale Set (VMSS) in flexible orchestration mode. -``` \ No newline at end of file diff --git a/changelog/17575.txt b/changelog/17575.txt deleted file mode 100644 index f08b53ff851cd..0000000000000 --- a/changelog/17575.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint -``` diff --git a/changelog/17636.txt b/changelog/17636.txt deleted file mode 100644 index 0668f21874511..0000000000000 --- a/changelog/17636.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures -``` diff --git a/changelog/17638.txt b/changelog/17638.txt deleted file mode 100644 index 37e0575397194..0000000000000 --- a/changelog/17638.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers -``` diff --git a/changelog/17650.txt b/changelog/17650.txt deleted file mode 100644 index 79e76692bb4f0..0000000000000 --- a/changelog/17650.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. -``` diff --git a/changelog/17678.txt b/changelog/17678.txt deleted file mode 100644 index bddf213d565f2..0000000000000 --- a/changelog/17678.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -Reduced binary size -``` diff --git a/changelog/17747.txt b/changelog/17747.txt deleted file mode 100644 index 53ceafa807fbd..0000000000000 --- a/changelog/17747.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] -``` diff --git a/changelog/17749.txt b/changelog/17749.txt deleted file mode 100644 index aac49467c40ef..0000000000000 --- a/changelog/17749.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Add inline policy creation when creating an identity entity or group -``` diff --git a/changelog/17750.txt b/changelog/17750.txt deleted file mode 100644 index af12458f45a8a..0000000000000 --- a/changelog/17750.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. -``` diff --git a/changelog/17752.txt b/changelog/17752.txt deleted file mode 100644 index 628eed8e68318..0000000000000 --- a/changelog/17752.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -openapi: fix gen_openapi.sh script to correctly load vault plugins -``` diff --git a/changelog/17768.txt b/changelog/17768.txt deleted file mode 100644 index 25246ea08ea4d..0000000000000 --- a/changelog/17768.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation -``` \ No newline at end of file diff --git a/changelog/17774.txt b/changelog/17774.txt deleted file mode 100644 index f6103ec60a369..0000000000000 --- a/changelog/17774.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked -``` diff --git a/changelog/17779.txt b/changelog/17779.txt deleted file mode 100644 index a0b3bd6c373ba..0000000000000 --- a/changelog/17779.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster -``` diff --git a/changelog/17789.txt b/changelog/17789.txt deleted file mode 100644 index fd6f3b0883c1e..0000000000000 --- a/changelog/17789.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. -``` \ No newline at end of file diff --git a/changelog/17822.txt b/changelog/17822.txt deleted file mode 100644 index 5c39b3c531328..0000000000000 --- a/changelog/17822.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead -``` diff --git a/changelog/17823.txt b/changelog/17823.txt deleted file mode 100644 index d999c879450ea..0000000000000 --- a/changelog/17823.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow tidying of expired issuer certificates. -``` diff --git a/changelog/17835.txt b/changelog/17835.txt deleted file mode 100644 index 3c48884364c5a..0000000000000 --- a/changelog/17835.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: mfa: use proper request id generation -``` diff --git a/changelog/17836.txt b/changelog/17836.txt deleted file mode 100644 index 66bc2369e07c5..0000000000000 --- a/changelog/17836.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: trying to unseal with the wrong key now returns HTTP 400 -``` \ No newline at end of file diff --git a/changelog/17841.txt b/changelog/17841.txt deleted file mode 100644 index 6234bbba0b8c8..0000000000000 --- a/changelog/17841.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config -``` \ No newline at end of file diff --git a/changelog/17855.txt b/changelog/17855.txt deleted file mode 100644 index c73838ae3642b..0000000000000 --- a/changelog/17855.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. -``` \ No newline at end of file diff --git a/changelog/17856.txt b/changelog/17856.txt deleted file mode 100644 index f039794857ab5..0000000000000 --- a/changelog/17856.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed -``` diff --git a/changelog/17857.txt b/changelog/17857.txt deleted file mode 100644 index 7c07f4e3d4301..0000000000000 --- a/changelog/17857.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/azure: upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/17858.txt b/changelog/17858.txt deleted file mode 100644 index 2b1a76e34c61c..0000000000000 --- a/changelog/17858.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/gcp: Upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/17866.txt b/changelog/17866.txt deleted file mode 100644 index 7e173f8de7375..0000000000000 --- a/changelog/17866.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: consolidate all tag usage -``` \ No newline at end of file diff --git a/changelog/17871.txt b/changelog/17871.txt deleted file mode 100644 index b4fb2b0ead3fa..0000000000000 --- a/changelog/17871.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/gcp: Upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/17893.txt b/changelog/17893.txt deleted file mode 100644 index 3dddc7659b8d8..0000000000000 --- a/changelog/17893.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. -``` \ No newline at end of file diff --git a/changelog/17894.txt b/changelog/17894.txt deleted file mode 100644 index bd056cdf34e65..0000000000000 --- a/changelog/17894.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: allow selection of "default" for ssh algorithm_signer in web interface -``` diff --git a/changelog/17904.txt b/changelog/17904.txt deleted file mode 100644 index aa654046b89cd..0000000000000 --- a/changelog/17904.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -credential/cert: adds error message if no tls connection is found during the AliasLookahead operation -``` \ No newline at end of file diff --git a/changelog/17909.txt b/changelog/17909.txt deleted file mode 100644 index 3f19f94398ee3..0000000000000 --- a/changelog/17909.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Mark request body objects as required -``` diff --git a/changelog/17919.txt b/changelog/17919.txt deleted file mode 100644 index 8fbb41db44a3b..0000000000000 --- a/changelog/17919.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: property based testing for LifetimeWatcher sleep duration calculation -``` diff --git a/changelog/17927.txt b/changelog/17927.txt deleted file mode 100644 index 946c5355e0239..0000000000000 --- a/changelog/17927.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Enable typescript for future development -``` \ No newline at end of file diff --git a/changelog/17929.txt b/changelog/17929.txt deleted file mode 100644 index 72b639c18d1c7..0000000000000 --- a/changelog/17929.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -core/server: Added an environment variable to write goroutine stacktraces to a -temporary file for SIGUSR2 signals. -``` diff --git a/changelog/17932.txt b/changelog/17932.txt deleted file mode 100644 index 09dd01c1b775e..0000000000000 --- a/changelog/17932.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug: -UI: Fix "MFA-Setup permission denied error" by using user-token specific MFA generate endpoint instead of admin-generate -``` diff --git a/changelog/17934.txt b/changelog/17934.txt deleted file mode 100644 index 7f087a915a285..0000000000000 --- a/changelog/17934.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data -``` diff --git a/changelog/17935.txt b/changelog/17935.txt deleted file mode 100644 index c16ffdfe332eb..0000000000000 --- a/changelog/17935.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/activity: return partial month counts when querying a historical date range and no historical data exists. -``` diff --git a/changelog/17951.txt b/changelog/17951.txt deleted file mode 100644 index 06dd7a4a5d341..0000000000000 --- a/changelog/17951.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: added changes for user lockout workflow. -``` \ No newline at end of file diff --git a/changelog/17964.txt b/changelog/17964.txt deleted file mode 100644 index 605100f8148c9..0000000000000 --- a/changelog/17964.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/azure: upgrades dependencies -``` diff --git a/changelog/18021.txt b/changelog/18021.txt deleted file mode 100644 index 69a6df1a6ab72..0000000000000 --- a/changelog/18021.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/alicloud: upgrades dependencies -``` diff --git a/changelog/18031.txt b/changelog/18031.txt deleted file mode 100644 index 74d659fb13e9c..0000000000000 --- a/changelog/18031.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -logging: Vault agent and server commands support log file and log rotation. -``` \ No newline at end of file diff --git a/changelog/18039.txt b/changelog/18039.txt deleted file mode 100644 index ea522a7539b22..0000000000000 --- a/changelog/18039.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:improvement -plugins: Mark logical database plugins Removed and remove the plugin code. -``` -```release-note:improvement -plugins: Mark app-id auth method Removed and remove the plugin code. -``` diff --git a/changelog/18040.txt b/changelog/18040.txt deleted file mode 100644 index cb4eaf81161d7..0000000000000 --- a/changelog/18040.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters -``` diff --git a/changelog/18043.txt b/changelog/18043.txt deleted file mode 100644 index 564574c7f2edf..0000000000000 --- a/changelog/18043.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/cert: Support listing provisioned CRLs within the mount. -``` diff --git a/changelog/18067.txt b/changelog/18067.txt deleted file mode 100644 index 2c86305d96d55..0000000000000 --- a/changelog/18067.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli/kv: improve kv CLI to remove data or custom metadata using kv patch -``` diff --git a/changelog/18086.txt b/changelog/18086.txt new file mode 100644 index 0000000000000..7d8d879503255 --- /dev/null +++ b/changelog/18086.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: add WAL to clean up role assignments if errors occur +``` diff --git a/changelog/18101.txt b/changelog/18101.txt deleted file mode 100644 index 97ece748d659f..0000000000000 --- a/changelog/18101.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. -``` diff --git a/changelog/18111.txt b/changelog/18111.txt new file mode 100644 index 0000000000000..bc3a0e5cd66fb --- /dev/null +++ b/changelog/18111.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary +``` \ No newline at end of file diff --git a/changelog/18128.txt b/changelog/18128.txt deleted file mode 100644 index 32dc537664718..0000000000000 --- a/changelog/18128.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries -``` diff --git a/changelog/18137.txt b/changelog/18137.txt deleted file mode 100644 index f262f96f35212..0000000000000 --- a/changelog/18137.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Configured Vault Agent listeners now listen without the need for caching to be configured. -``` diff --git a/changelog/18186.txt b/changelog/18186.txt deleted file mode 100644 index 13710826284e8..0000000000000 --- a/changelog/18186.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:breaking-change -secrets/pki: Maintaining running count of certificates will be turned off by default. -To re-enable keeping these metrics available on the tidy status endpoint, enable -maintain_stored_certificate_counts on tidy-config, to also publish them to the -metrics consumer, enable publish_stored_certificate_count_metrics . -``` \ No newline at end of file diff --git a/changelog/18192.txt b/changelog/18192.txt deleted file mode 100644 index 56e377e47a055..0000000000000 --- a/changelog/18192.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add logic to generate openapi response structures -``` diff --git a/changelog/18198.txt b/changelog/18198.txt deleted file mode 100644 index ed03e74eb1664..0000000000000 --- a/changelog/18198.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add openapi response definitions to approle/path_role.go -``` diff --git a/changelog/18199.txt b/changelog/18199.txt deleted file mode 100644 index 706a5568ccf1c..0000000000000 --- a/changelog/18199.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. -``` diff --git a/changelog/18207.txt b/changelog/18207.txt new file mode 100644 index 0000000000000..00c6d9ac26d74 --- /dev/null +++ b/changelog/18207.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. +``` diff --git a/changelog/18222.txt b/changelog/18222.txt deleted file mode 100644 index a7f822aa73a3e..0000000000000 --- a/changelog/18222.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. -``` diff --git a/changelog/18225.txt b/changelog/18225.txt deleted file mode 100644 index 567c3c78da95e..0000000000000 --- a/changelog/18225.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI -``` diff --git a/changelog/18227.txt b/changelog/18227.txt deleted file mode 100644 index 477915982d03d..0000000000000 --- a/changelog/18227.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket -``` \ No newline at end of file diff --git a/changelog/18228.txt b/changelog/18228.txt deleted file mode 100644 index 4f1b6d1046c36..0000000000000 --- a/changelog/18228.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) -``` diff --git a/changelog/18230.txt b/changelog/18230.txt deleted file mode 100644 index 335f9670db2ae..0000000000000 --- a/changelog/18230.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/ldap: allow configuration of alias dereferencing in LDAP search -``` diff --git a/changelog/18243.txt b/changelog/18243.txt deleted file mode 100644 index f187579aa7833..0000000000000 --- a/changelog/18243.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -secrets/transit: Add an optional reference field to batch operation items -which is repeated on batch responses to help more easily correlate inputs with outputs. -``` \ No newline at end of file diff --git a/changelog/18244.txt b/changelog/18244.txt deleted file mode 100644 index b81de038dc0f1..0000000000000 --- a/changelog/18244.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: parallelize backend initialization to improve startup time for large numbers of mounts. -``` \ No newline at end of file diff --git a/changelog/18272.txt b/changelog/18272.txt deleted file mode 100644 index 168913be811b8..0000000000000 --- a/changelog/18272.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Allow configuring whether upsert of keys is allowed. -``` diff --git a/changelog/18279.txt b/changelog/18279.txt deleted file mode 100644 index 9823a1f19b7a4..0000000000000 --- a/changelog/18279.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user -with given mount_accessor and alias_identifier if locked -``` \ No newline at end of file diff --git a/changelog/18302.txt b/changelog/18302.txt deleted file mode 100644 index 1f4b69dc863a7..0000000000000 --- a/changelog/18302.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -hcp/status: Expand node-level status information -``` diff --git a/changelog/18315.txt b/changelog/18315.txt deleted file mode 100644 index dc6984749ae9c..0000000000000 --- a/changelog/18315.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs -``` diff --git a/changelog/18351.txt b/changelog/18351.txt deleted file mode 100644 index 07faa06d13560..0000000000000 --- a/changelog/18351.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -hcp/status: Add cluster-level status information -``` diff --git a/changelog/18374.txt b/changelog/18374.txt deleted file mode 100644 index eaa57980d2230..0000000000000 --- a/changelog/18374.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: update DocLink component to use new host url: developer.hashicorp.com -``` diff --git a/changelog/18376.txt b/changelog/18376.txt deleted file mode 100644 index 1edc3df5a1ad1..0000000000000 --- a/changelog/18376.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add openapi response definitions to pki/config_*.go -``` diff --git a/changelog/18397.txt b/changelog/18397.txt deleted file mode 100644 index aafb9d71e6fd2..0000000000000 --- a/changelog/18397.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when -allowed by role``` diff --git a/changelog/18401.txt b/changelog/18401.txt index 8f1c148fd13a0..441e9a08b0df8 100644 --- a/changelog/18401.txt +++ b/changelog/18401.txt @@ -1,3 +1,3 @@ ```release-note:bug -expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. +expiration: Prevent panics on perf standbys when an irrevocable release gets deleted. ``` diff --git a/changelog/18403.txt b/changelog/18403.txt deleted file mode 100644 index 458f6c92633f6..0000000000000 --- a/changelog/18403.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. -``` diff --git a/changelog/18437.txt b/changelog/18437.txt deleted file mode 100644 index 9ca8a8dc3bb3c..0000000000000 --- a/changelog/18437.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -client/pki: Add a new command verify-sign which checks the relationship between two certificates. -``` diff --git a/changelog/18452.txt b/changelog/18452.txt deleted file mode 100644 index 6d4566667ee49..0000000000000 --- a/changelog/18452.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/activity: de-duplicate namespaces when historical and current month data are mixed -``` diff --git a/changelog/18456.txt b/changelog/18456.txt deleted file mode 100644 index ee297508f2a4b..0000000000000 --- a/changelog/18456.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response defintions to /sys/audit endpoints -``` \ No newline at end of file diff --git a/changelog/18463.txt b/changelog/18463.txt deleted file mode 100644 index 538f66eb1dc73..0000000000000 --- a/changelog/18463.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli/pki: Add List-Intermediates functionality to pki client. -``` diff --git a/changelog/18465.txt b/changelog/18465.txt deleted file mode 100644 index 928da99bc4fef..0000000000000 --- a/changelog/18465.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response defintions to /sys/auth endpoints -``` \ No newline at end of file diff --git a/changelog/18467.txt b/changelog/18467.txt deleted file mode 100644 index 55a85a6487a29..0000000000000 --- a/changelog/18467.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. -``` \ No newline at end of file diff --git a/changelog/18468.txt b/changelog/18468.txt deleted file mode 100644 index 362bf05018c5b..0000000000000 --- a/changelog/18468.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response defintions to /sys/capabilities endpoints -``` \ No newline at end of file diff --git a/changelog/18472.txt b/changelog/18472.txt deleted file mode 100644 index e34d53afc2f68..0000000000000 --- a/changelog/18472.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints -``` \ No newline at end of file diff --git a/changelog/18482.txt b/changelog/18482.txt deleted file mode 100644 index f51abb675c4a6..0000000000000 --- a/changelog/18482.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. -``` diff --git a/changelog/18499.txt b/changelog/18499.txt deleted file mode 100644 index b329ed0db08b8..0000000000000 --- a/changelog/18499.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. -``` \ No newline at end of file diff --git a/changelog/18515.txt b/changelog/18515.txt deleted file mode 100644 index 86eb71b191677..0000000000000 --- a/changelog/18515.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. -``` diff --git a/changelog/18521.txt b/changelog/18521.txt deleted file mode 100644 index 4111aea2c98e4..0000000000000 --- a/changelog/18521.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: wait for wanted message event during OIDC callback instead of using the first message event -``` diff --git a/changelog/18542.txt b/changelog/18542.txt deleted file mode 100644 index ff4674010f4b7..0000000000000 --- a/changelog/18542.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response definitions to /sys/internal endpoints -``` diff --git a/changelog/18568.txt b/changelog/18568.txt deleted file mode 100644 index a1fbabf2545a0..0000000000000 --- a/changelog/18568.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths -``` diff --git a/changelog/18585.txt b/changelog/18585.txt deleted file mode 100644 index a0832e2d415e0..0000000000000 --- a/changelog/18585.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -hcp/connectivity: Only update SCADA session metadata if status changes -``` diff --git a/changelog/18587.txt b/changelog/18587.txt deleted file mode 100644 index 7471d9a4c0251..0000000000000 --- a/changelog/18587.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] -``` diff --git a/changelog/18589.txt b/changelog/18589.txt deleted file mode 100644 index 2e1ef4878dab1..0000000000000 --- a/changelog/18589.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 -``` \ No newline at end of file diff --git a/changelog/18598.txt b/changelog/18598.txt deleted file mode 100644 index 62d13d0e705f6..0000000000000 --- a/changelog/18598.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/activity: include mount counts when de-duplicating current and historical month data -``` diff --git a/changelog/18604.txt b/changelog/18604.txt deleted file mode 100644 index 7645cbb40394d..0000000000000 --- a/changelog/18604.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: add `detect_deadlocks` config to optionally detect core state deadlocks -``` \ No newline at end of file diff --git a/changelog/18610.txt b/changelog/18610.txt deleted file mode 100644 index bac3add5a1a3c..0000000000000 --- a/changelog/18610.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. -Make passcode arguments consistent across login MFA method types. -``` \ No newline at end of file diff --git a/changelog/18624.txt b/changelog/18624.txt deleted file mode 100644 index 91209bb46d9e2..0000000000000 --- a/changelog/18624.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response definitions to /sys/rotate endpoints -``` diff --git a/changelog/18625.txt b/changelog/18625.txt deleted file mode 100644 index 526d6b63e6f6b..0000000000000 --- a/changelog/18625.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response definitions to /sys/seal endpoints -``` \ No newline at end of file diff --git a/changelog/18626.txt b/changelog/18626.txt deleted file mode 100644 index 6bb2ba0f4d89b..0000000000000 --- a/changelog/18626.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response definitions to /sys/tool endpoints -``` \ No newline at end of file diff --git a/changelog/18627.txt b/changelog/18627.txt deleted file mode 100644 index e2a4dfb5f2e61..0000000000000 --- a/changelog/18627.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response definitions to /sys/wrapping endpoints -``` \ No newline at end of file diff --git a/changelog/18628.txt b/changelog/18628.txt deleted file mode 100644 index 0722856c93b93..0000000000000 --- a/changelog/18628.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req -``` \ No newline at end of file diff --git a/changelog/18632.txt b/changelog/18632.txt deleted file mode 100644 index 535961367a3a8..0000000000000 --- a/changelog/18632.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -database/postgres: Support multiline strings for revocation statements. -``` diff --git a/changelog/18633.txt b/changelog/18633.txt deleted file mode 100644 index 2048c46d914ec..0000000000000 --- a/changelog/18633.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add openapi response definitions to /sys defined endpoints. -``` \ No newline at end of file diff --git a/changelog/18635.txt b/changelog/18635.txt deleted file mode 100644 index 43f3fdf673653..0000000000000 --- a/changelog/18635.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) -``` diff --git a/changelog/18636.txt b/changelog/18636.txt deleted file mode 100644 index 9f260e2e86fc0..0000000000000 --- a/changelog/18636.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests -``` diff --git a/changelog/18638.txt b/changelog/18638.txt deleted file mode 100644 index 727c85a669967..0000000000000 --- a/changelog/18638.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: allows some parts of config to be reloaded without requiring a restart. -``` \ No newline at end of file diff --git a/changelog/18645.txt b/changelog/18645.txt deleted file mode 100644 index 0122111bae42a..0000000000000 --- a/changelog/18645.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. -``` diff --git a/changelog/18663.txt b/changelog/18663.txt deleted file mode 100644 index 941b2715ef75a..0000000000000 --- a/changelog/18663.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` -``` diff --git a/changelog/18673.txt b/changelog/18673.txt deleted file mode 100644 index 73a2a8f43925f..0000000000000 --- a/changelog/18673.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. -``` \ No newline at end of file diff --git a/changelog/18675.txt b/changelog/18675.txt deleted file mode 100644 index 90a8ed64d21cb..0000000000000 --- a/changelog/18675.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from -sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. -``` \ No newline at end of file diff --git a/changelog/18682.txt b/changelog/18682.txt deleted file mode 100644 index 9042109033511..0000000000000 --- a/changelog/18682.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -core: Add experiments system and `events.alpha1` experiment. -``` - diff --git a/changelog/18684.txt b/changelog/18684.txt deleted file mode 100644 index 803c7cc571f94..0000000000000 --- a/changelog/18684.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. -``` diff --git a/changelog/18704.txt b/changelog/18704.txt deleted file mode 100644 index bc76db9ba9230..0000000000000 --- a/changelog/18704.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -storage/raft: Fix race with follower heartbeat tracker during teardown. -``` \ No newline at end of file diff --git a/changelog/18708.txt b/changelog/18708.txt deleted file mode 100644 index 1db2ba6239f0f..0000000000000 --- a/changelog/18708.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext -``` diff --git a/changelog/18718.txt b/changelog/18718.txt deleted file mode 100644 index a5b9b133421c9..0000000000000 --- a/changelog/18718.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. -``` \ No newline at end of file diff --git a/changelog/18729.txt b/changelog/18729.txt deleted file mode 100644 index 975d0274bc6f9..0000000000000 --- a/changelog/18729.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. -``` \ No newline at end of file diff --git a/changelog/18740.txt b/changelog/18740.txt deleted file mode 100644 index f493995d48a69..0000000000000 --- a/changelog/18740.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. -``` diff --git a/changelog/18752.txt b/changelog/18752.txt deleted file mode 100644 index 95346e0431d12..0000000000000 --- a/changelog/18752.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -**Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. -``` \ No newline at end of file diff --git a/changelog/18766.txt b/changelog/18766.txt deleted file mode 100644 index 50743b3916f11..0000000000000 --- a/changelog/18766.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. -``` diff --git a/changelog/18772.txt b/changelog/18772.txt deleted file mode 100644 index 55c0696de1ba5..0000000000000 --- a/changelog/18772.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go -``` diff --git a/changelog/18811.txt b/changelog/18811.txt deleted file mode 100644 index 34a155dda5a4d..0000000000000 --- a/changelog/18811.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. -``` diff --git a/changelog/18817.txt b/changelog/18817.txt deleted file mode 100644 index 17c93aab75c80..0000000000000 --- a/changelog/18817.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. -``` \ No newline at end of file diff --git a/changelog/18842.txt b/changelog/18842.txt deleted file mode 100644 index 9a69ff66f1269..0000000000000 --- a/changelog/18842.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**New PKI UI**: Add beta support for new and improved PKI UI -``` \ No newline at end of file diff --git a/changelog/18859.txt b/changelog/18859.txt deleted file mode 100644 index 0ee2c361e2919..0000000000000 --- a/changelog/18859.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided -``` diff --git a/changelog/18863.txt b/changelog/18863.txt deleted file mode 100644 index c1f2800c28904..0000000000000 --- a/changelog/18863.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). -``` \ No newline at end of file diff --git a/changelog/18870.txt b/changelog/18870.txt deleted file mode 100644 index 1b694895fec61..0000000000000 --- a/changelog/18870.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core:provide more descriptive error message when calling enterprise feature paths in open-source -``` \ No newline at end of file diff --git a/changelog/18874.txt b/changelog/18874.txt deleted file mode 100644 index 7483c43f90601..0000000000000 --- a/changelog/18874.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:security -secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. -``` diff --git a/changelog/18885.txt b/changelog/18885.txt deleted file mode 100644 index 99878c89c1034..0000000000000 --- a/changelog/18885.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:enhancement -auth/cert: Load config, crls from InitializeFunc to allow parallel processing. -``` diff --git a/changelog/18887.txt b/changelog/18887.txt deleted file mode 100644 index 55e8600878df5..0000000000000 --- a/changelog/18887.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: Add transit import key helper commands for BYOK to Transit/Transform. -``` \ No newline at end of file diff --git a/changelog/18890.txt b/changelog/18890.txt deleted file mode 100644 index 056e585996980..0000000000000 --- a/changelog/18890.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:bug -core: removes strings.ToLower for alias name from pathLoginAliasLookahead function in userpass. This fixes -the storage entry for locked users by having the correct alias name in path. -`` \ No newline at end of file diff --git a/changelog/18892.txt b/changelog/18892.txt deleted file mode 100644 index 65b6ebf246a7b..0000000000000 --- a/changelog/18892.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` -``` diff --git a/changelog/18916.txt b/changelog/18916.txt deleted file mode 100644 index eb2792b31e407..0000000000000 --- a/changelog/18916.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. -``` diff --git a/changelog/18934.txt b/changelog/18934.txt deleted file mode 100644 index e84f6667ccb2b..0000000000000 --- a/changelog/18934.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Change gen_openapi.sh to generate schema with generic mount paths -``` diff --git a/changelog/18935.txt b/changelog/18935.txt deleted file mode 100644 index c55cda115cd8f..0000000000000 --- a/changelog/18935.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Add default values to thing_mount_path parameters -``` diff --git a/changelog/18939.txt b/changelog/18939.txt deleted file mode 100644 index aa7f8e7c6658f..0000000000000 --- a/changelog/18939.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. -``` diff --git a/changelog/18962.txt b/changelog/18962.txt deleted file mode 100644 index 322c34780a2e0..0000000000000 --- a/changelog/18962.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: Remove dependency on sdk module. -``` diff --git a/changelog/18984.txt b/changelog/18984.txt deleted file mode 100644 index 4652bf299bfef..0000000000000 --- a/changelog/18984.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: consistently use UTC for CA's notAfter exceeded error message -``` diff --git a/changelog/19002.txt b/changelog/19002.txt deleted file mode 100644 index d1a1ff5371ab4..0000000000000 --- a/changelog/19002.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. -``` \ No newline at end of file diff --git a/changelog/19005.txt b/changelog/19005.txt deleted file mode 100644 index 27e251e193dd5..0000000000000 --- a/changelog/19005.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:change -auth/alicloud: require the `role` field on login -``` - -```release-note:bug -auth/alicloud: fix regression in vault login command that caused login to fail -``` diff --git a/changelog/19018.txt b/changelog/19018.txt deleted file mode 100644 index bd79dbd15911d..0000000000000 --- a/changelog/19018.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:feature -**GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. -``` - -```release-note:bug -secrets/gcp: fix issue where IAM bindings were not preserved during policy update -``` diff --git a/changelog/19043.txt b/changelog/19043.txt deleted file mode 100644 index 20a1a77bb7890..0000000000000 --- a/changelog/19043.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: added ability to validate response structures against openapi schema for test clusters -``` \ No newline at end of file diff --git a/changelog/19044.txt b/changelog/19044.txt deleted file mode 100644 index 7926bb66c9a71..0000000000000 --- a/changelog/19044.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -database/redis-elasticache: changed config argument names for disambiguation -``` \ No newline at end of file diff --git a/changelog/19056.txt b/changelog/19056.txt deleted file mode 100644 index b5b1ae3520307..0000000000000 --- a/changelog/19056.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/kv: make upgrade synchronous when no keys to upgrade -``` \ No newline at end of file diff --git a/changelog/19061.txt b/changelog/19061.txt deleted file mode 100644 index ddf794358def2..0000000000000 --- a/changelog/19061.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/ad: Fix bug where updates to config would fail if password isn't provided -``` diff --git a/changelog/19063.txt b/changelog/19063.txt deleted file mode 100644 index df361111bd9f9..0000000000000 --- a/changelog/19063.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 -``` diff --git a/changelog/19068.txt b/changelog/19068.txt deleted file mode 100644 index 6edb29fe11039..0000000000000 --- a/changelog/19068.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -sdk: Remove version package, make useragent.String versionless. -``` diff --git a/changelog/19076.txt b/changelog/19076.txt deleted file mode 100644 index c206f44e82c3b..0000000000000 --- a/changelog/19076.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:improvement -auth/oidc: Adds ability to set Google Workspace domain for groups search -``` -```release-note:improvement -auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts -``` \ No newline at end of file diff --git a/changelog/19077.txt b/changelog/19077.txt deleted file mode 100644 index 604cea57b5c6d..0000000000000 --- a/changelog/19077.txt +++ /dev/null @@ -1,11 +0,0 @@ -```release-note:feature -**Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine -``` - -```release-note:feature -**Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault -``` - -```release-note:feature -**VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication -``` \ No newline at end of file diff --git a/changelog/19084.txt b/changelog/19084.txt deleted file mode 100644 index 97896d3d27bc3..0000000000000 --- a/changelog/19084.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/kubernetes: add /check endpoint to determine if environment variables are set -``` diff --git a/changelog/19094.txt b/changelog/19094.txt deleted file mode 100644 index d3d872d91c965..0000000000000 --- a/changelog/19094.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) -``` diff --git a/changelog/19096.txt b/changelog/19096.txt deleted file mode 100644 index 2cb0bbf04a685..0000000000000 --- a/changelog/19096.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/azure: Adds ability to persist an application for the lifetime of a role. -``` \ No newline at end of file diff --git a/changelog/19098.txt b/changelog/19098.txt deleted file mode 100644 index df0f9c11ca3bd..0000000000000 --- a/changelog/19098.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/cf: Remove incorrect usage of CreateOperation from path_config -``` diff --git a/changelog/19100.txt b/changelog/19100.txt deleted file mode 100644 index a2f1b72e18df1..0000000000000 --- a/changelog/19100.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 -``` - diff --git a/changelog/19111.txt b/changelog/19111.txt deleted file mode 100644 index 35b7803d69740..0000000000000 --- a/changelog/19111.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys -``` diff --git a/changelog/19116.txt b/changelog/19116.txt deleted file mode 100644 index 5dfcd9ecfadab..0000000000000 --- a/changelog/19116.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Allows license-banners to be dismissed. Saves preferences in localStorage. -``` \ No newline at end of file diff --git a/changelog/19139.txt b/changelog/19139.txt deleted file mode 100644 index 75e9a7847e17f..0000000000000 --- a/changelog/19139.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes -``` diff --git a/changelog/19145.txt b/changelog/19145.txt deleted file mode 100644 index 9cca8e85d6344..0000000000000 --- a/changelog/19145.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -secrets/kv: Emit events on write if events system enabled -``` - diff --git a/changelog/19160.txt b/changelog/19160.txt deleted file mode 100644 index 66a3baa157583..0000000000000 --- a/changelog/19160.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. -``` \ No newline at end of file diff --git a/changelog/19170.txt b/changelog/19170.txt deleted file mode 100644 index 9a421dd183a2d..0000000000000 --- a/changelog/19170.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -website/docs: fix database static-user sample payload -``` diff --git a/changelog/19187.txt b/changelog/19187.txt deleted file mode 100644 index c04234a1bb9ba..0000000000000 --- a/changelog/19187.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -website/docs: Add rotate root documentation for azure secrets engine -``` diff --git a/changelog/19194.txt b/changelog/19194.txt deleted file mode 100644 index b2a5ff383f126..0000000000000 --- a/changelog/19194.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:feature -**Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. -``` - diff --git a/changelog/19196.txt b/changelog/19196.txt deleted file mode 100644 index aab2638ceac79..0000000000000 --- a/changelog/19196.txt +++ /dev/null @@ -1,5 +0,0 @@ -```release-note:feature -**PKI Cross-Cluster Revocations**: Revocation information can now be -synchronized across primary and performance replica clusters offering -a unified CRL/OCSP view of revocations across cluster boundaries. -``` diff --git a/changelog/19215.txt b/changelog/19215.txt deleted file mode 100644 index 33fea94666bd1..0000000000000 --- a/changelog/19215.txt +++ /dev/null @@ -1,5 +0,0 @@ -```release-note:feature -**Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run -as an external plugin by vault versions that support secrets/auth plugin -multiplexing (> 1.12) -``` diff --git a/changelog/19216.txt b/changelog/19216.txt deleted file mode 100644 index e03e866e08b45..0000000000000 --- a/changelog/19216.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: adds allowed_response_headers as param for secret engine mount config -``` diff --git a/changelog/19247.txt b/changelog/19247.txt deleted file mode 100644 index f51e8479c97f1..0000000000000 --- a/changelog/19247.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. -``` diff --git a/changelog/19252.txt b/changelog/19252.txt deleted file mode 100644 index 99121351d98ca..0000000000000 --- a/changelog/19252.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Consistently stop Vault server on exit in gen_openapi.sh -``` diff --git a/changelog/19260.txt b/changelog/19260.txt deleted file mode 100644 index 77138a38607cb..0000000000000 --- a/changelog/19260.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**agent/auto-auth:**: Add OCI (Oracle Cloud Infrastructure) auto-auth method -``` diff --git a/changelog/19265.txt b/changelog/19265.txt deleted file mode 100644 index 23d957e2d5940..0000000000000 --- a/changelog/19265.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -cli/pki: Decode integer values properly in health-check configuration file -``` diff --git a/changelog/19269.txt b/changelog/19269.txt deleted file mode 100644 index 57ff2072a18c3..0000000000000 --- a/changelog/19269.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file -``` diff --git a/changelog/19274.txt b/changelog/19274.txt deleted file mode 100644 index a7f5d8c292934..0000000000000 --- a/changelog/19274.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -cli/pki: Fix path for role health-check warning messages -``` diff --git a/changelog/19276.txt b/changelog/19276.txt deleted file mode 100644 index 373199478f921..0000000000000 --- a/changelog/19276.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -cli/pki: Properly report permission issues within health-check mount tune checks -``` diff --git a/changelog/19290.txt b/changelog/19290.txt deleted file mode 100644 index 1a4511590c695..0000000000000 --- a/changelog/19290.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. -``` diff --git a/changelog/19296.txt b/changelog/19296.txt deleted file mode 100644 index 1ef62a0cde2e6..0000000000000 --- a/changelog/19296.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. -``` \ No newline at end of file diff --git a/changelog/19319.txt b/changelog/19319.txt deleted file mode 100644 index 4702344afb081..0000000000000 --- a/changelog/19319.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -openapi: Improve operationId/request/response naming strategy -``` diff --git a/changelog/19334.txt b/changelog/19334.txt deleted file mode 100644 index 7df68268aabe8..0000000000000 --- a/changelog/19334.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:deprecation -secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. -``` \ No newline at end of file diff --git a/changelog/19365.txt b/changelog/19365.txt deleted file mode 100644 index 774c750f49513..0000000000000 --- a/changelog/19365.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note: enhancement -auth/aws: Support request cancellation with AWS requests -``` - -```release-note: enhancement -secrets/aws: Support request cancellation with AWS requests -``` diff --git a/changelog/19373.txt b/changelog/19373.txt deleted file mode 100644 index 87751805e7d80..0000000000000 --- a/changelog/19373.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -cli/transit: Fix import, import-version command invocation -``` diff --git a/changelog/19378.txt b/changelog/19378.txt deleted file mode 100644 index 40a1e82fcb642..0000000000000 --- a/changelog/19378.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -cli/kv: add -mount flag to kv list -``` diff --git a/changelog/19410.txt b/changelog/19410.txt new file mode 100644 index 0000000000000..1a4fd85c8fb16 --- /dev/null +++ b/changelog/19410.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes reliance on secure context (https) by removing methods using the Crypto interface +``` diff --git a/changelog/19416.txt b/changelog/19416.txt deleted file mode 100644 index f2a7d3275b64a..0000000000000 --- a/changelog/19416.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -auth/token: Fix cubbyhole and revocation for legacy service tokens -``` diff --git a/changelog/19428.txt b/changelog/19428.txt deleted file mode 100644 index c1ae6d54bbcba..0000000000000 --- a/changelog/19428.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library -``` \ No newline at end of file diff --git a/changelog/19448.txt b/changelog/19448.txt deleted file mode 100644 index 8c75b79f140c5..0000000000000 --- a/changelog/19448.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes SSH engine config deletion -``` diff --git a/changelog/19468.txt b/changelog/19468.txt deleted file mode 100644 index 5afce90eb65f3..0000000000000 --- a/changelog/19468.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -plugin/reload: Fix a possible data race with rollback manager and plugin reload -``` diff --git a/changelog/19472.txt b/changelog/19472.txt deleted file mode 100644 index db9ec7276550e..0000000000000 --- a/changelog/19472.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -autopilot: Update version to v0.2.0 to add better support for respecting min quorum -``` diff --git a/changelog/19483.txt b/changelog/19483.txt deleted file mode 100644 index c7ba6f66d97d7..0000000000000 --- a/changelog/19483.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. -``` diff --git a/changelog/19495.txt b/changelog/19495.txt deleted file mode 100644 index dac2ca00dfb87..0000000000000 --- a/changelog/19495.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -shamir: change mul and div implementations to be constant-time -``` \ No newline at end of file diff --git a/changelog/19519.txt b/changelog/19519.txt deleted file mode 100644 index 6756f62b2d47a..0000000000000 --- a/changelog/19519.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. -``` diff --git a/changelog/19520.txt b/changelog/19520.txt deleted file mode 100644 index 726be2c13a60b..0000000000000 --- a/changelog/19520.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -http: Support responding to HEAD operation from plugins -``` diff --git a/changelog/19541.txt b/changelog/19541.txt deleted file mode 100644 index 9bdecc35832d3..0000000000000 --- a/changelog/19541.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted -``` diff --git a/changelog/19545.txt b/changelog/19545.txt deleted file mode 100644 index 615742cd32655..0000000000000 --- a/changelog/19545.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -database/elasticsearch: Update error messages resulting from Elasticsearch API errors -``` \ No newline at end of file diff --git a/changelog/19593.txt b/changelog/19593.txt deleted file mode 100644 index 8f170578ec07f..0000000000000 --- a/changelog/19593.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -events: Suppress log warnings triggered when events are sent but the events system is not enabled. -``` - diff --git a/changelog/19616.txt b/changelog/19616.txt deleted file mode 100644 index 3afcc608d19a9..0000000000000 --- a/changelog/19616.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL -``` \ No newline at end of file diff --git a/changelog/19624.txt b/changelog/19624.txt deleted file mode 100644 index 7bc2df63ea858..0000000000000 --- a/changelog/19624.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug -``` diff --git a/changelog/19640.txt b/changelog/19640.txt deleted file mode 100644 index 8dcf59bf87fbd..0000000000000 --- a/changelog/19640.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug - secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. -``` diff --git a/changelog/19641.txt b/changelog/19641.txt new file mode 100644 index 0000000000000..01c1bcf3d3f9a --- /dev/null +++ b/changelog/19641.txt @@ -0,0 +1,3 @@ +```release-note:bug + secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. +``` \ No newline at end of file diff --git a/changelog/19776.txt b/changelog/19776.txt deleted file mode 100644 index 786cfd321673f..0000000000000 --- a/changelog/19776.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. -``` diff --git a/changelog/19791.txt b/changelog/19791.txt deleted file mode 100644 index 26722cde31334..0000000000000 --- a/changelog/19791.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: add allowed_managed_keys field to secret engine mount options -``` diff --git a/changelog/19798.txt b/changelog/19798.txt deleted file mode 100644 index 4bae8b637897b..0000000000000 --- a/changelog/19798.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/terraform: upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/19814.txt b/changelog/19814.txt deleted file mode 100644 index 687527efca8a8..0000000000000 --- a/changelog/19814.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging -``` \ No newline at end of file diff --git a/changelog/19829.txt b/changelog/19829.txt deleted file mode 100644 index e8472b2717edb..0000000000000 --- a/changelog/19829.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/ad: upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/19846.txt b/changelog/19846.txt deleted file mode 100644 index 269b11797b9e8..0000000000000 --- a/changelog/19846.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/alicloud: upgrades dependencies -``` diff --git a/changelog/19861.txt b/changelog/19861.txt deleted file mode 100644 index ee5bc703e9cbe..0000000000000 --- a/changelog/19861.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/mongodbatlas: upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/19862.txt b/changelog/19862.txt deleted file mode 100644 index c1ce6d8bb7105..0000000000000 --- a/changelog/19862.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -build: Prefer GOBIN when set over GOPATH/bin when building the binary -``` diff --git a/changelog/19878.txt b/changelog/19878.txt deleted file mode 100644 index 4135434b79238..0000000000000 --- a/changelog/19878.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Remove the Bulma CSS framework. -``` \ No newline at end of file diff --git a/changelog/19891.txt b/changelog/19891.txt deleted file mode 100644 index b030151e858b2..0000000000000 --- a/changelog/19891.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core (enterprise): add configuration for license reporting -``` \ No newline at end of file diff --git a/changelog/19901.txt b/changelog/19901.txt deleted file mode 100644 index 8e0bbbddb5ec6..0000000000000 --- a/changelog/19901.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Updates UI javascript dependencies -``` \ No newline at end of file diff --git a/changelog/19913.txt b/changelog/19913.txt deleted file mode 100644 index eccdec6533ad2..0000000000000 --- a/changelog/19913.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Adds whitespace warning to secrets engine and auth method path inputs -``` \ No newline at end of file diff --git a/changelog/19954.txt b/changelog/19954.txt deleted file mode 100644 index e0ff45f87d22c..0000000000000 --- a/changelog/19954.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -database/mongodb: upgrade mongo driver to 1.11 -``` diff --git a/changelog/19993.txt b/changelog/19993.txt deleted file mode 100644 index 90650863ab887..0000000000000 --- a/changelog/19993.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/openldap: upgrades dependencies -``` \ No newline at end of file diff --git a/changelog/20034.txt b/changelog/20034.txt deleted file mode 100644 index c1050795bdc4d..0000000000000 --- a/changelog/20034.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note: bug -secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. -``` diff --git a/changelog/20057.txt b/changelog/20057.txt deleted file mode 100644 index 585a07d91b3a2..0000000000000 --- a/changelog/20057.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note: bug -secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. -``` diff --git a/changelog/20058.txt b/changelog/20058.txt deleted file mode 100644 index e43a1f4adf934..0000000000000 --- a/changelog/20058.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. -``` diff --git a/changelog/20070.txt b/changelog/20070.txt deleted file mode 100644 index 34e6e5540d695..0000000000000 --- a/changelog/20070.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes remaining doc links to include /vault in path -``` \ No newline at end of file diff --git a/changelog/20073.txt b/changelog/20073.txt deleted file mode 100644 index 10c21a58ba522..0000000000000 --- a/changelog/20073.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/activity: refactor the activity log's generation of precomputed queries -``` \ No newline at end of file diff --git a/changelog/20078.txt b/changelog/20078.txt deleted file mode 100644 index 8749354b315d7..0000000000000 --- a/changelog/20078.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/activity: error when attempting to update retention configuration below the minimum -``` \ No newline at end of file diff --git a/changelog/20086.txt b/changelog/20086.txt deleted file mode 100644 index 9511c97b66e31..0000000000000 --- a/changelog/20086.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -api: `/sys/internal/counters/config` endpoint now contains read-only -`reporting_enabled` and `billing_start_timestamp` fields. -``` diff --git a/changelog/20109.txt b/changelog/20109.txt deleted file mode 100644 index 8c7cb3b32de14..0000000000000 --- a/changelog/20109.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -sys/wrapping: Add example how to unwrap without authentication in Vault -``` diff --git a/changelog/20125.txt b/changelog/20125.txt deleted file mode 100644 index 07dd8201dba89..0000000000000 --- a/changelog/20125.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: updates clients configuration edit form state based on census reporting configuration -``` \ No newline at end of file diff --git a/changelog/20150.txt b/changelog/20150.txt deleted file mode 100644 index 0ea8259f9e663..0000000000000 --- a/changelog/20150.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -api: `/sys/internal/counters/config` endpoint now contains read-only -`minimum_retention_months`. -``` diff --git a/changelog/20154.txt b/changelog/20154.txt deleted file mode 100644 index 7bda3624fba18..0000000000000 --- a/changelog/20154.txt +++ /dev/null @@ -1,2 +0,0 @@ -```release-note:bug -auth/cert: Include OCSP parameters in read CA certificate role response. diff --git a/changelog/20163.txt b/changelog/20163.txt deleted file mode 100644 index 0b845fbae0db7..0000000000000 --- a/changelog/20163.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation -``` diff --git a/changelog/20181.txt b/changelog/20181.txt deleted file mode 100644 index 121c869e4aaf8..0000000000000 --- a/changelog/20181.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:bug -sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. -auth/cert: Fix OCSP validation against Vault's PKI engine. -``` diff --git a/changelog/20216.txt b/changelog/20216.txt deleted file mode 100644 index 59ee78c889e33..0000000000000 --- a/changelog/20216.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. -``` diff --git a/changelog/20224.txt b/changelog/20224.txt deleted file mode 100644 index 7ec5bf612177d..0000000000000 --- a/changelog/20224.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. -``` diff --git a/changelog/20234.txt b/changelog/20234.txt deleted file mode 100644 index 1f20bdc5a9209..0000000000000 --- a/changelog/20234.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/cert: Better return OCSP validation errors during login to the caller. -``` diff --git a/changelog/20247.txt b/changelog/20247.txt deleted file mode 100644 index 91f2f0d23fcdf..0000000000000 --- a/changelog/20247.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -sdk: Add new docker-based cluster testing framework to the sdk. -``` diff --git a/changelog/20253.txt b/changelog/20253.txt deleted file mode 100644 index 19edae1bc4f2e..0000000000000 --- a/changelog/20253.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. -``` diff --git a/changelog/20261.txt b/changelog/20261.txt deleted file mode 100644 index 5f4eb977cce1f..0000000000000 --- a/changelog/20261.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -* physical/etcd: Upgrade etcd3 client to v3.5.7 -``` \ No newline at end of file diff --git a/changelog/20265.txt b/changelog/20265.txt deleted file mode 100644 index 8e27875f627f3..0000000000000 --- a/changelog/20265.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -* api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. -``` \ No newline at end of file diff --git a/changelog/20276.txt b/changelog/20276.txt deleted file mode 100644 index 71f288ab9a0d6..0000000000000 --- a/changelog/20276.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Include CA serial number, key UUID on issuers list endpoint. -``` diff --git a/changelog/20285.txt b/changelog/20285.txt deleted file mode 100644 index 2bc2241dfe0ba..0000000000000 --- a/changelog/20285.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" -``` diff --git a/changelog/20294.txt b/changelog/20294.txt deleted file mode 100644 index 92f7c291892be..0000000000000 --- a/changelog/20294.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -Add debug symbols back to builds to fix Dynatrace support -``` diff --git a/changelog/20354.txt b/changelog/20354.txt deleted file mode 100644 index abdacb7dac457..0000000000000 --- a/changelog/20354.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. -``` diff --git a/changelog/20368.txt b/changelog/20368.txt deleted file mode 100644 index bca5957d1d290..0000000000000 --- a/changelog/20368.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/identity: Allow updates of only the custom-metadata for entity alias. -``` \ No newline at end of file diff --git a/changelog/20418.txt b/changelog/20418.txt deleted file mode 100644 index 596b7e461d23c..0000000000000 --- a/changelog/20418.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -command/server: fixes panic in Vault server command when running in recovery mode -``` \ No newline at end of file diff --git a/changelog/20425.txt b/changelog/20425.txt deleted file mode 100644 index 20869fc19f708..0000000000000 --- a/changelog/20425.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**MongoDB Atlas Database Secrets**: Adds support for client certificate credentials -``` diff --git a/changelog/20430.txt b/changelog/20430.txt deleted file mode 100644 index 5ac95f104cdb5..0000000000000 --- a/changelog/20430.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Fix secret render when path includes %. Resolves #11616. -``` diff --git a/changelog/20431.txt b/changelog/20431.txt deleted file mode 100644 index a0083d879ecd0..0000000000000 --- a/changelog/20431.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Add download button for each secret value in KV v2 -``` diff --git a/changelog/20441.txt b/changelog/20441.txt deleted file mode 100644 index 628784883f8c4..0000000000000 --- a/changelog/20441.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow determining existing issuers and keys on import. -``` diff --git a/changelog/20442.txt b/changelog/20442.txt deleted file mode 100644 index 09636b69b0602..0000000000000 --- a/changelog/20442.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. -``` diff --git a/changelog/20464.txt b/changelog/20464.txt deleted file mode 100644 index 6b58153fccf61..0000000000000 --- a/changelog/20464.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path -``` diff --git a/changelog/20481.txt b/changelog/20481.txt deleted file mode 100644 index c6f27116311b5..0000000000000 --- a/changelog/20481.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Add filtering by engine type and engine name to the Secret Engine list view. -``` diff --git a/changelog/20488.txt b/changelog/20488.txt deleted file mode 100644 index 5ea0f78b39285..0000000000000 --- a/changelog/20488.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: Improve addPrefixToKVPath helper -``` diff --git a/changelog/20519.txt b/changelog/20519.txt deleted file mode 100644 index 92f7c291892be..0000000000000 --- a/changelog/20519.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -Add debug symbols back to builds to fix Dynatrace support -``` diff --git a/changelog/20530.txt b/changelog/20530.txt deleted file mode 100644 index 6f6d04bf17e82..0000000000000 --- a/changelog/20530.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. -``` diff --git a/changelog/20536.txt b/changelog/20536.txt deleted file mode 100644 index 62aa93605c380..0000000000000 --- a/changelog/20536.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. -``` diff --git a/changelog/20548.txt b/changelog/20548.txt deleted file mode 100644 index fed5d2b4506e2..0000000000000 --- a/changelog/20548.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. -``` diff --git a/changelog/20559.txt b/changelog/20559.txt deleted file mode 100644 index 2ff6422db0dbe..0000000000000 --- a/changelog/20559.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. -``` diff --git a/changelog/20569.txt b/changelog/20569.txt deleted file mode 100644 index e10a4643ea7fd..0000000000000 --- a/changelog/20569.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Add logic to validate env_template entries in configuration -``` diff --git a/changelog/20590.txt b/changelog/20590.txt deleted file mode 100644 index c1c7c9e2b5265..0000000000000 --- a/changelog/20590.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata -``` diff --git a/changelog/20595.txt b/changelog/20595.txt deleted file mode 100644 index 982f41498f13b..0000000000000 --- a/changelog/20595.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Add possibility to decode a generated encoded root token via the rest API -``` diff --git a/changelog/20603.txt b/changelog/20603.txt deleted file mode 100644 index c3e7e2bbe7db0..0000000000000 --- a/changelog/20603.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes issue creating mfa login enforcement from method enforcements tab -``` \ No newline at end of file diff --git a/changelog/20626.txt b/changelog/20626.txt deleted file mode 100644 index 2a13cee1735da..0000000000000 --- a/changelog/20626.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -activitylog: EntityRecord protobufs now contain a ClientType field for -distinguishing client sources. -``` diff --git a/changelog/20628.txt b/changelog/20628.txt deleted file mode 100644 index 978814601a30e..0000000000000 --- a/changelog/20628.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent -``` \ No newline at end of file diff --git a/changelog/20629.txt b/changelog/20629.txt deleted file mode 100644 index f5692f7691e08..0000000000000 --- a/changelog/20629.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. -``` \ No newline at end of file diff --git a/changelog/20643.txt b/changelog/20643.txt deleted file mode 100644 index 340ec5b547ff4..0000000000000 --- a/changelog/20643.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: report intermediate error messages during request forwarding -``` diff --git a/changelog/20652.txt b/changelog/20652.txt deleted file mode 100644 index c41e750c04721..0000000000000 --- a/changelog/20652.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Respond to writes with updated key policy, cache configuration. -``` diff --git a/changelog/20654.txt b/changelog/20654.txt deleted file mode 100644 index 91e567477b5b9..0000000000000 --- a/changelog/20654.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. -``` diff --git a/changelog/20664.txt b/changelog/20664.txt deleted file mode 100644 index 6f2b4abe61ae3..0000000000000 --- a/changelog/20664.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. -``` diff --git a/changelog/20668.txt b/changelog/20668.txt deleted file mode 100644 index f3f840c47d1db..0000000000000 --- a/changelog/20668.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. -``` diff --git a/changelog/20680.txt b/changelog/20680.txt deleted file mode 100644 index ff80ac4660924..0000000000000 --- a/changelog/20680.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:improvement -core (enterprise): support reloading configuration for automated reporting via SIGHUP -``` -```release-note:improvement -core (enterprise): license updates trigger a reload of reporting and the activity log -``` \ No newline at end of file diff --git a/changelog/20694.txt b/changelog/20694.txt deleted file mode 100644 index 07f790a666ddf..0000000000000 --- a/changelog/20694.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -api: GET ... /sys/internal/counters/activity?current_billing_period=true now -results in a response which contains the full billing period -``` diff --git a/changelog/20697.txt b/changelog/20697.txt deleted file mode 100644 index be80443714dae..0000000000000 --- a/changelog/20697.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') -``` diff --git a/changelog/20701.txt b/changelog/20701.txt deleted file mode 100644 index 24942d5d066cc..0000000000000 --- a/changelog/20701.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-notes:bug -secrets/pki: Fix race during runUnifiedTransfer when deciding to skip re-running a test within a short window. -``` diff --git a/changelog/20725.txt b/changelog/20725.txt deleted file mode 100644 index 04399cca8f63e..0000000000000 --- a/changelog/20725.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -auth/gcp: Updated plugin from v0.15.0 to v0.16.0 -``` diff --git a/changelog/20731.txt b/changelog/20731.txt deleted file mode 100644 index 1896c199add9c..0000000000000 --- a/changelog/20731.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes auto_rotate_period ttl input for transit keys -``` diff --git a/changelog/20736.txt b/changelog/20736.txt deleted file mode 100644 index 1c4c3d4d256e0..0000000000000 --- a/changelog/20736.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. -``` diff --git a/changelog/20741.txt b/changelog/20741.txt deleted file mode 100644 index 8034e456e0c67..0000000000000 --- a/changelog/20741.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: Add integration tests for agent running in process supervisor mode -``` diff --git a/changelog/20742.txt b/changelog/20742.txt deleted file mode 100644 index d91237e1d3912..0000000000000 --- a/changelog/20742.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 -``` diff --git a/changelog/20745.txt b/changelog/20745.txt deleted file mode 100644 index 57a4391ba22de..0000000000000 --- a/changelog/20745.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -auth/centrify: Updated plugin from v0.14.0 to v0.15.1 -``` diff --git a/changelog/20747.txt b/changelog/20747.txt deleted file mode 100644 index 4c600d203fb3a..0000000000000 --- a/changelog/20747.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Add filtering by auth type and auth name to the Authentication Method list view. -``` diff --git a/changelog/20750.txt b/changelog/20750.txt deleted file mode 100644 index 75a3e1da364e9..0000000000000 --- a/changelog/20750.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 -``` \ No newline at end of file diff --git a/changelog/20751.txt b/changelog/20751.txt deleted file mode 100644 index 9b78b3dfe5a29..0000000000000 --- a/changelog/20751.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 -``` diff --git a/changelog/20752.txt b/changelog/20752.txt deleted file mode 100644 index 667bc37f37b38..0000000000000 --- a/changelog/20752.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. -``` diff --git a/changelog/20758.txt b/changelog/20758.txt deleted file mode 100644 index 7eed0b075191a..0000000000000 --- a/changelog/20758.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 -``` \ No newline at end of file diff --git a/changelog/20763.txt b/changelog/20763.txt deleted file mode 100644 index 311dcb0a62f42..0000000000000 --- a/changelog/20763.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -database/redis: Upgrade plugin dependencies -``` diff --git a/changelog/20764.txt b/changelog/20764.txt deleted file mode 100644 index adc14e07f152f..0000000000000 --- a/changelog/20764.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -database/couchbase: Updated plugin from v0.9.0 to v0.9.2 -``` diff --git a/changelog/20767.txt b/changelog/20767.txt deleted file mode 100644 index b6d853a63903f..0000000000000 --- a/changelog/20767.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -database/elasticsearch: Upgrade plugin dependencies -``` diff --git a/changelog/20771.txt b/changelog/20771.txt deleted file mode 100644 index 5cc1ee2d472c8..0000000000000 --- a/changelog/20771.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -auth/kerberos: Enable plugin multiplexing -auth/kerberos: Upgrade plugin dependencies -``` diff --git a/changelog/20777.txt b/changelog/20777.txt deleted file mode 100644 index ec3c9e42b58ba..0000000000000 --- a/changelog/20777.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/aure: Updated plugin from v0.15.0 to v0.16.0 -``` \ No newline at end of file diff --git a/changelog/20783.txt b/changelog/20783.txt deleted file mode 100644 index 372d36cb7b1e4..0000000000000 --- a/changelog/20783.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. -``` \ No newline at end of file diff --git a/changelog/20784.txt b/changelog/20784.txt deleted file mode 100644 index b24a857a20020..0000000000000 --- a/changelog/20784.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -secrets/gcpkms: Enable plugin multiplexing -secrets/gcpkms: Upgrade plugin dependencies -``` diff --git a/changelog/20787.txt b/changelog/20787.txt deleted file mode 100644 index a69b90d7de82b..0000000000000 --- a/changelog/20787.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 -``` \ No newline at end of file diff --git a/changelog/20799.txt b/changelog/20799.txt deleted file mode 100644 index 2e17ff921d7b5..0000000000000 --- a/changelog/20799.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -auth/jwt: Updated plugin from v0.15.0 to v0.16.0 -``` diff --git a/changelog/20802.txt b/changelog/20802.txt deleted file mode 100644 index de8e1b90dc068..0000000000000 --- a/changelog/20802.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:change -secrets/kubernetes: Update plugin to v0.5.0 -``` -```release-note:change -auth/kubernetes: Update plugin to v0.16.0 -``` diff --git a/changelog/20807.txt b/changelog/20807.txt deleted file mode 100644 index 3a3c1f4cdad31..0000000000000 --- a/changelog/20807.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 -``` \ No newline at end of file diff --git a/changelog/20816.txt b/changelog/20816.txt deleted file mode 100644 index aae4b59c48dc1..0000000000000 --- a/changelog/20816.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -auth/azure: Updated plugin from v0.13.0 to v0.15.0 -``` diff --git a/changelog/20818.txt b/changelog/20818.txt deleted file mode 100644 index 885ee92ce8aa4..0000000000000 --- a/changelog/20818.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 -``` diff --git a/changelog/20825.txt b/changelog/20825.txt deleted file mode 100644 index da993696b0480..0000000000000 --- a/changelog/20825.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. -``` \ No newline at end of file diff --git a/changelog/20834.txt b/changelog/20834.txt deleted file mode 100644 index f17f1d326b584..0000000000000 --- a/changelog/20834.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. -``` \ No newline at end of file diff --git a/changelog/20882.txt b/changelog/20882.txt deleted file mode 100644 index 3694468641da6..0000000000000 --- a/changelog/20882.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:change -secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 -``` -```release-note:feature -**MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication -``` \ No newline at end of file diff --git a/changelog/20891.txt b/changelog/20891.txt deleted file mode 100644 index 3057ec56f40dc..0000000000000 --- a/changelog/20891.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -secrets/consul: Improve error message when ACL bootstrapping fails. -``` - diff --git a/changelog/20907.txt b/changelog/20907.txt deleted file mode 100644 index 3f13a659de2b2..0000000000000 --- a/changelog/20907.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fixes key_bits and signature_bits reverting to default values when editing a pki role -``` \ No newline at end of file diff --git a/changelog/20933.txt b/changelog/20933.txt deleted file mode 100644 index 580475e2b5d52..0000000000000 --- a/changelog/20933.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: remove unnecessary *BarrierView field from backendEntry struct -``` \ No newline at end of file diff --git a/changelog/20934.txt b/changelog/20934.txt deleted file mode 100644 index 72c22574d615a..0000000000000 --- a/changelog/20934.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -agent: Fix bug with 'cache' stanza validation -``` diff --git a/changelog/20943.txt b/changelog/20943.txt deleted file mode 100644 index 7cf186d18420d..0000000000000 --- a/changelog/20943.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. -``` diff --git a/changelog/20981.txt b/changelog/20981.txt deleted file mode 100644 index 26a5304c5d3da..0000000000000 --- a/changelog/20981.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days -``` diff --git a/changelog/20995.txt b/changelog/20995.txt deleted file mode 100644 index 76653d4d54334..0000000000000 --- a/changelog/20995.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: adding a new api sys method for replication status -``` diff --git a/changelog/21010.txt b/changelog/21010.txt deleted file mode 100644 index bcd218794df9c..0000000000000 --- a/changelog/21010.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. -``` \ No newline at end of file diff --git a/changelog/21100.txt b/changelog/21100.txt deleted file mode 100644 index 50024c9c2d3a4..0000000000000 --- a/changelog/21100.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:bug -replication (enterprise): Fix regression causing token creation against a role -with a new entity alias to be incorrectly forwarded from perf standbys. -``` diff --git a/changelog/_go-ver-1122.txt b/changelog/_go-ver-1122.txt new file mode 100644 index 0000000000000..c7744e1497577 --- /dev/null +++ b/changelog/_go-ver-1122.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.19.3. +``` diff --git a/changelog/_go-ver-1123.txt b/changelog/_go-ver-1123.txt new file mode 100644 index 0000000000000..588b90c052da3 --- /dev/null +++ b/changelog/_go-ver-1123.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.19.4. +``` diff --git a/changelog/_go-ver-1124.txt b/changelog/_go-ver-1124.txt new file mode 100644 index 0000000000000..afd57943ede15 --- /dev/null +++ b/changelog/_go-ver-1124.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.19.6. +``` diff --git a/changelog/_go-ver-1126.txt b/changelog/_go-ver-1126.txt new file mode 100644 index 0000000000000..7fd5dbd734289 --- /dev/null +++ b/changelog/_go-ver-1126.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.19.8. +``` diff --git a/changelog/_go-ver-1127.txt b/changelog/_go-ver-1127.txt new file mode 100644 index 0000000000000..b6c1fc18b6529 --- /dev/null +++ b/changelog/_go-ver-1127.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.19.9. +``` diff --git a/changelog/_go-ver-1130.txt b/changelog/_go-ver-1130.txt deleted file mode 100644 index c63e249c45883..0000000000000 --- a/changelog/_go-ver-1130.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -core: Bump Go version to 1.20. -``` diff --git a/changelog/_go-ver-1140.txt b/changelog/_go-ver-1140.txt deleted file mode 100644 index b57fd6478d0b6..0000000000000 --- a/changelog/_go-ver-1140.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -core: Bump Go version to 1.20.4. -``` diff --git a/changelog/pki-ui-improvements.txt b/changelog/pki-ui-improvements.txt deleted file mode 100644 index d824033f2e3c4..0000000000000 --- a/changelog/pki-ui-improvements.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience -``` \ No newline at end of file diff --git a/changelog/plugin-versioning.txt b/changelog/plugin-versioning.txt deleted file mode 100644 index cfd77a4778ede..0000000000000 --- a/changelog/plugin-versioning.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. -``` \ No newline at end of file diff --git a/command/agent.go b/command/agent.go index 11942ce533775..b90665683cc1c 100644 --- a/command/agent.go +++ b/command/agent.go @@ -1,57 +1,59 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "context" "crypto/tls" - "errors" "flag" "fmt" "io" + "io/ioutil" "net" "net/http" "os" + "path/filepath" "sort" "strings" "sync" "time" systemd "github.com/coreos/go-systemd/daemon" - ctconfig "github.com/hashicorp/consul-template/config" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/gatedwriter" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/go-secure-stdlib/reloadutil" - "github.com/kr/pretty" - "github.com/mitchellh/cli" - "github.com/oklog/run" - "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "google.golang.org/grpc/test/bufconn" - "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agent/auth/alicloud" + "github.com/hashicorp/vault/command/agent/auth/approle" + "github.com/hashicorp/vault/command/agent/auth/aws" + "github.com/hashicorp/vault/command/agent/auth/azure" + "github.com/hashicorp/vault/command/agent/auth/cert" + "github.com/hashicorp/vault/command/agent/auth/cf" + "github.com/hashicorp/vault/command/agent/auth/gcp" + "github.com/hashicorp/vault/command/agent/auth/jwt" + "github.com/hashicorp/vault/command/agent/auth/kerberos" + "github.com/hashicorp/vault/command/agent/auth/kubernetes" + "github.com/hashicorp/vault/command/agent/cache" + "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agent/cache/cachememdb" + "github.com/hashicorp/vault/command/agent/cache/keymanager" agentConfig "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/exec" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agent/sink/inmem" "github.com/hashicorp/vault/command/agent/template" - "github.com/hashicorp/vault/command/agentproxyshared" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - cache "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" - "github.com/hashicorp/vault/command/agentproxyshared/winsvc" - "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/command/agent/winsvc" "github.com/hashicorp/vault/helper/metricsutil" - "github.com/hashicorp/vault/helper/useragent" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/useragent" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" + "github.com/kr/pretty" + "github.com/mitchellh/cli" + "github.com/oklog/run" + "github.com/posener/complete" + "google.golang.org/grpc/test/bufconn" ) var ( @@ -59,24 +61,12 @@ var ( _ cli.CommandAutocomplete = (*AgentCommand)(nil) ) -const ( - // flagNameAgentExitAfterAuth is used as an Agent specific flag to indicate - // that agent should exit after a single successful auth - flagNameAgentExitAfterAuth = "exit-after-auth" -) - type AgentCommand struct { *BaseCommand - logFlags logFlags - - config *agentConfig.Config ShutdownCh chan struct{} SighupCh chan struct{} - tlsReloadFuncsLock sync.RWMutex - tlsReloadFuncs []reloadutil.ReloadFunc - logWriter io.Writer logGate *gatedwriter.Writer logger log.Logger @@ -86,12 +76,14 @@ type AgentCommand struct { cleanupGuard sync.Once - startedCh chan struct{} // for tests - reloadedCh chan struct{} // for tests + startedCh chan (struct{}) // for tests + + flagConfigs []string + flagLogLevel string + flagExitAfterAuth bool - flagConfigs []string - flagExitAfterAuth bool flagTestVerifyOnly bool + flagCombineLogs bool } func (c *AgentCommand) Synopsis() string { @@ -102,7 +94,7 @@ func (c *AgentCommand) Help() string { helpText := ` Usage: vault agent [options] - This command starts a Vault Agent that can perform automatic authentication + This command starts a Vault agent that can perform automatic authentication in certain environments. Start an agent with a configuration file: @@ -120,9 +112,6 @@ func (c *AgentCommand) Flags() *FlagSets { f := set.NewFlagSet("Command Options") - // Augment with the log flags - f.addLogFlags(&c.logFlags) - f.StringSliceVar(&StringSliceVar{ Name: "config", Target: &c.flagConfigs, @@ -134,8 +123,18 @@ func (c *AgentCommand) Flags() *FlagSets { "contain only agent directives.", }) + f.StringVar(&StringVar{ + Name: "log-level", + Target: &c.flagLogLevel, + Default: "info", + EnvVar: "VAULT_LOG_LEVEL", + Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"), + Usage: "Log verbosity level. Supported values (in order of detail) are " + + "\"trace\", \"debug\", \"info\", \"warn\", and \"error\".", + }) + f.BoolVar(&BoolVar{ - Name: flagNameAgentExitAfterAuth, + Name: "exit-after-auth", Target: &c.flagExitAfterAuth, Default: false, Usage: "If set to true, the agent will exit with code 0 after a single " + @@ -150,6 +149,15 @@ func (c *AgentCommand) Flags() *FlagSets { // no warranty or backwards-compatibility promise. Do not use these flags // in production. Do not build automation using these flags. Unless you are // developing against Vault, you should not need any of these flags. + + // TODO: should the below flags be public? + f.BoolVar(&BoolVar{ + Name: "combine-logs", + Target: &c.flagCombineLogs, + Default: false, + Hidden: true, + }) + f.BoolVar(&BoolVar{ Name: "test-verify-only", Target: &c.flagTestVerifyOnly, @@ -182,40 +190,119 @@ func (c *AgentCommand) Run(args []string) int { // start logging too early. c.logGate = gatedwriter.NewWriter(os.Stderr) c.logWriter = c.logGate - - if c.logFlags.flagCombineLogs { + if c.flagCombineLogs { c.logWriter = os.Stdout } + var level log.Level + c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel)) + switch c.flagLogLevel { + case "trace": + level = log.Trace + case "debug": + level = log.Debug + case "notice", "info", "": + level = log.Info + case "warn", "warning": + level = log.Warn + case "err", "error": + level = log.Error + default: + c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel)) + return 1 + } + + if c.logger == nil { + c.logger = logging.NewVaultLoggerWithWriter(c.logWriter, level) + } // Validation - if len(c.flagConfigs) < 1 { - c.UI.Error("Must specify exactly at least one config path using -config") + if len(c.flagConfigs) != 1 { + c.UI.Error("Must specify exactly one config path using -config") return 1 } - config, err := c.loadConfig(c.flagConfigs) + // Load the configuration + config, err := agentConfig.LoadConfig(c.flagConfigs[0]) if err != nil { - c.outputErrors(err) + c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfigs[0], err)) return 1 } + // Ensure at least one config was found. + if config == nil { + c.UI.Output(wrapAtLength( + "No configuration read. Please provide the configuration with the " + + "-config flag.")) + return 1 + } + if config.AutoAuth == nil && config.Cache == nil { + c.UI.Error("No auto_auth or cache block found in config file") + return 1 + } if config.AutoAuth == nil { - c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") + c.UI.Info("No auto_auth block found in config file, not starting automatic authentication feature") } - c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars - c.config = config + exitAfterAuth := config.ExitAfterAuth + f.Visit(func(fl *flag.Flag) { + if fl.Name == "exit-after-auth" { + exitAfterAuth = c.flagExitAfterAuth + } + }) - l, err := c.newLogger() - if err != nil { - c.outputErrors(err) - return 1 - } - c.logger = l + c.setStringFlag(f, config.Vault.Address, &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + Default: "https://127.0.0.1:8200", + EnvVar: api.EnvVaultAddress, + }) + config.Vault.Address = c.flagAddress + c.setStringFlag(f, config.Vault.CACert, &StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + }) + config.Vault.CACert = c.flagCACert + c.setStringFlag(f, config.Vault.CAPath, &StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + }) + config.Vault.CAPath = c.flagCAPath + c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + }) + config.Vault.ClientCert = c.flagClientCert + c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + }) + config.Vault.ClientKey = c.flagClientKey + c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + }) + config.Vault.TLSSkipVerify = c.flagTLSSkipVerify + c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + }) + config.Vault.TLSServerName = c.flagTLSServerName infoKeys := make([]string, 0, 10) info := make(map[string]string) - info["log level"] = config.LogLevel + info["log level"] = c.flagLogLevel infoKeys = append(infoKeys, "log level") infoKeys = append(infoKeys, "version") @@ -237,14 +324,14 @@ func (c *AgentCommand) Run(args []string) int { if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { c.UI.Output(fmt.Sprintf( "\nConfiguration:\n%s\n", - pretty.Sprint(*c.config))) + pretty.Sprint(*config))) } return 0 } - // Ignore any setting of Agent's address. This client is used by the Agent + // Ignore any setting of agent's address. This client is used by the agent // to reach out to Vault. This should never loop back to agent. - c.flagAgentProxyAddress = "" + c.flagAgentAddress = "" client, err := c.Client() if err != nil { c.UI.Error(fmt.Sprintf( @@ -253,28 +340,7 @@ func (c *AgentCommand) Run(args []string) int { return 1 } - serverHealth, err := client.Sys().Health() - if err == nil { - // We don't exit on error here, as this is not worth stopping Agent over - serverVersion := serverHealth.Version - agentVersion := version.GetVersion().VersionNumber() - if serverVersion != agentVersion { - c.UI.Info("==> Note: Vault Agent version does not match Vault server version. " + - fmt.Sprintf("Vault Agent version: %s, Vault server version: %s", agentVersion, serverVersion)) - } - } - - if config.IsDefaultListerDefined() { - // Notably, we cannot know for sure if they are using the API proxy functionality unless - // we log on each API proxy call, which would be too noisy. - // A customer could have a listener defined but only be using e.g. the cache-clear API, - // even though the API proxy is something they have available. - c.UI.Warn("==> Note: Vault Agent will be deprecating API proxy functionality in a future " + - "release, and this functionality has moved to a new subcommand, vault proxy. If you rely on this " + - "functionality, plan to move to Vault Proxy instead.") - } - - // ctx and cancelFunc are passed to the AuthHandler, SinkServer, ExecServer and + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, and // TemplateServer that periodically listen for ctx.Done() to fire and shut // down accordingly. ctx, cancelFunc := context.WithCancel(context.Background()) @@ -286,7 +352,7 @@ func (c *AgentCommand) Run(args []string) int { Ui: c.UI, ServiceName: "vault", DisplayName: "Vault", - UserAgent: useragent.AgentString(), + UserAgent: useragent.String(), ClusterName: config.ClusterName, }) if err != nil { @@ -333,7 +399,7 @@ func (c *AgentCommand) Run(args []string) int { } s, err := file.NewFileSink(config) if err != nil { - c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) + c.UI.Error(fmt.Errorf("Error creating file sink: %w", err).Error()) return 1 } config.Sink = s @@ -349,9 +415,35 @@ func (c *AgentCommand) Run(args []string) int { MountPath: config.AutoAuth.Method.MountPath, Config: config.AutoAuth.Method.Config, } - method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) + switch config.AutoAuth.Method.Type { + case "alicloud": + method, err = alicloud.NewAliCloudAuthMethod(authConfig) + case "aws": + method, err = aws.NewAWSAuthMethod(authConfig) + case "azure": + method, err = azure.NewAzureAuthMethod(authConfig) + case "cert": + method, err = cert.NewCertAuthMethod(authConfig) + case "cf": + method, err = cf.NewCFAuthMethod(authConfig) + case "gcp": + method, err = gcp.NewGCPAuthMethod(authConfig) + case "jwt": + method, err = jwt.NewJWTAuthMethod(authConfig) + case "kerberos": + method, err = kerberos.NewKerberosAuthMethod(authConfig) + case "kubernetes": + method, err = kubernetes.NewKubernetesAuthMethod(authConfig) + case "approle": + method, err = approle.NewApproleAuthMethod(authConfig) + case "pcf": // Deprecated. + method, err = cf.NewCFAuthMethod(authConfig) + default: + c.UI.Error(fmt.Sprintf("Unknown auth method %q", config.AutoAuth.Method.Type)) + return 1 + } if err != nil { - c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) + c.UI.Error(fmt.Errorf("Error creating %s auth method: %w", config.AutoAuth.Method.Type, err).Error()) return 1 } } @@ -360,47 +452,15 @@ func (c *AgentCommand) Run(args []string) int { // confuse the issue of retries for auth failures which have their own // config and are handled a bit differently. if os.Getenv(api.EnvVaultMaxRetries) == "" { - client.SetMaxRetries(ctconfig.DefaultRetryAttempts) - if config.Vault != nil { - if config.Vault.Retry != nil { - client.SetMaxRetries(config.Vault.Retry.NumRetries) - } - } + client.SetMaxRetries(config.Vault.Retry.NumRetries) } enforceConsistency := cache.EnforceConsistencyNever whenInconsistent := cache.WhenInconsistentFail - if config.APIProxy != nil { - switch config.APIProxy.EnforceConsistency { - case "always": - enforceConsistency = cache.EnforceConsistencyAlways - case "never", "": - default: - c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) - return 1 - } - - switch config.APIProxy.WhenInconsistent { - case "retry": - whenInconsistent = cache.WhenInconsistentRetry - case "forward": - whenInconsistent = cache.WhenInconsistentForward - case "fail", "": - default: - c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) - return 1 - } - } - // Keep Cache configuration for legacy reasons, but error if defined alongside API Proxy if config.Cache != nil { switch config.Cache.EnforceConsistency { case "always": - if enforceConsistency != cache.EnforceConsistencyNever { - c.UI.Error("enforce_consistency configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") - return 1 - } else { - enforceConsistency = cache.EnforceConsistencyAlways - } + enforceConsistency = cache.EnforceConsistencyAlways case "never", "": default: c.UI.Error(fmt.Sprintf("Unknown cache setting for enforce_consistency: %q", config.Cache.EnforceConsistency)) @@ -409,19 +469,9 @@ func (c *AgentCommand) Run(args []string) int { switch config.Cache.WhenInconsistent { case "retry": - if whenInconsistent != cache.WhenInconsistentFail { - c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") - return 1 - } else { - whenInconsistent = cache.WhenInconsistentRetry - } + whenInconsistent = cache.WhenInconsistentRetry case "forward": - if whenInconsistent != cache.WhenInconsistentFail { - c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") - return 1 - } else { - whenInconsistent = cache.WhenInconsistentForward - } + whenInconsistent = cache.WhenInconsistentForward case "fail", "": default: c.UI.Error(fmt.Sprintf("Unknown cache setting for when_inconsistent: %q", config.Cache.WhenInconsistent)) @@ -446,46 +496,41 @@ func (c *AgentCommand) Run(args []string) int { } // Output the header that the agent has started - if !c.logFlags.flagCombineLogs { - c.UI.Output("==> Vault Agent started! Log data will stream in below:\n") + if !c.flagCombineLogs { + c.UI.Output("==> Vault agent started! Log data will stream in below:\n") } var leaseCache *cache.LeaseCache var previousToken string + // Parse agent listener configurations + if config.Cache != nil { + cacheLogger := c.logger.Named("cache") - proxyClient, err := client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) - return 1 - } - - if config.DisableIdleConnsAPIProxy { - proxyClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAPIProxy { - proxyClient.SetDisableKeepAlives(true) - } + proxyClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for caching: %v", err)) + return 1 + } - apiProxyLogger := c.logger.Named("apiproxy") + if config.DisableIdleConnsCaching { + proxyClient.SetMaxIdleConnections(-1) + } - // The API proxy to be used, if listeners are configured - apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: proxyClient, - Logger: apiProxyLogger, - EnforceConsistency: enforceConsistency, - WhenInconsistentAction: whenInconsistent, - UserAgentStringFunction: useragent.AgentProxyStringWithProxiedUserAgent, - UserAgentString: useragent.AgentProxyString(), - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) - return 1 - } + if config.DisableKeepAlivesCaching { + proxyClient.SetDisableKeepAlives(true) + } - // Parse agent cache configurations - if config.Cache != nil { - cacheLogger := c.logger.Named("cache") + // Create the API proxier + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: proxyClient, + Logger: cacheLogger.Named("apiproxy"), + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) + return 1 + } // Create the lease cache proxier and set its underlying proxier to // the API proxier. @@ -502,175 +547,272 @@ func (c *AgentCommand) Run(args []string) int { // Configure persistent storage and add to LeaseCache if config.Cache.Persist != nil { - deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + if config.Cache.Persist.Path == "" { + c.UI.Error("must specify persistent cache path") return 1 } - previousToken = oldToken - if deferFunc != nil { - defer deferFunc() + + // Set AAD based on key protection type + var aad string + switch config.Cache.Persist.Type { + case "kubernetes": + aad, err = getServiceAccountJWT(config.Cache.Persist.ServiceAccountTokenFile) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to read service account token from %s: %s", config.Cache.Persist.ServiceAccountTokenFile, err)) + return 1 + } + default: + c.UI.Error(fmt.Sprintf("persistent key protection type %q not supported", config.Cache.Persist.Type)) + return 1 } - } - } - var listeners []net.Listener + // Check if bolt file exists already + dbFileExists, err := cacheboltdb.DBFileExists(config.Cache.Persist.Path) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to check if bolt file exists at path %s: %s", config.Cache.Persist.Path, err)) + return 1 + } + if dbFileExists { + // Open the bolt file, but wait to setup Encryption + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: config.Cache.Persist.Path, + Logger: cacheLogger.Named("cacheboltdb"), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening persistent cache: %v", err)) + return 1 + } - // If there are templates, add an in-process listener - if len(config.Templates) > 0 || len(config.EnvTemplates) > 0 { - config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) - } + // Get the token from bolt for retrieving the encryption key, + // then setup encryption so that restore is possible + token, err := ps.GetRetrievalToken() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting retrieval token from persistent cache: %v", err)) + } - // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. - c.tlsReloadFuncsLock.Lock() + if err := ps.Close(); err != nil { + c.UI.Warn(fmt.Sprintf("Failed to close persistent cache file after getting retrieval token: %s", err)) + } - for i, lnConfig := range config.Listeners { - var ln net.Listener - var tlsCfg *tls.Config + km, err := keymanager.NewPassthroughKeyManager(ctx, token) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) + return 1 + } - if lnConfig.Type == listenerutil.BufConnType { - inProcListener := bufconn.Listen(1024 * 1024) - if config.Cache != nil { - config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + // Open the bolt file with the wrapper provided + ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: config.Cache.Persist.Path, + Logger: cacheLogger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening persistent cache with wrapper: %v", err)) + return 1 + } + + // Restore anything in the persistent cache to the memory cache + if err := leaseCache.Restore(ctx, ps); err != nil { + c.UI.Error(fmt.Sprintf("Error restoring in-memory cache from persisted file: %v", err)) + if config.Cache.Persist.ExitOnErr { + return 1 + } + } + cacheLogger.Info("loaded memcache from persistent storage") + + // Check for previous auto-auth token + oldTokenBytes, err := ps.GetAutoAuthToken(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error in fetching previous auto-auth token: %s", err)) + if config.Cache.Persist.ExitOnErr { + return 1 + } + } + if len(oldTokenBytes) > 0 { + oldToken, err := cachememdb.Deserialize(oldTokenBytes) + if err != nil { + c.UI.Error(fmt.Sprintf("Error in deserializing previous auto-auth token cache entry: %s", err)) + if config.Cache.Persist.ExitOnErr { + return 1 + } + } + previousToken = oldToken.Token + } + + // If keep_after_import true, set persistent storage layer in + // leaseCache, else remove db file + if config.Cache.Persist.KeepAfterImport { + defer ps.Close() + leaseCache.SetPersistentStorage(ps) + } else { + if err := ps.Close(); err != nil { + c.UI.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) + } + dbFile := filepath.Join(config.Cache.Persist.Path, cacheboltdb.DatabaseFileName) + if err := os.Remove(dbFile); err != nil { + c.UI.Error(fmt.Sprintf("failed to remove persistent storage file %s: %s", dbFile, err)) + if config.Cache.Persist.ExitOnErr { + return 1 + } + } + } + } else { + km, err := keymanager.NewPassthroughKeyManager(ctx, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) + return 1 + } + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: config.Cache.Persist.Path, + Logger: cacheLogger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + return 1 + } + cacheLogger.Info("configured persistent storage", "path", config.Cache.Persist.Path) + + // Stash the key material in bolt + token, err := km.RetrievalToken(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting persistent key: %s", err)) + return 1 + } + if err := ps.StoreRetrievalToken(token); err != nil { + c.UI.Error(fmt.Sprintf("Error setting key in persistent cache: %v", err)) + return 1 + } + + defer ps.Close() + leaseCache.SetPersistentStorage(ps) } - ln = inProcListener - } else { - lnBundle, err := cache.StartListener(lnConfig) + } + + var inmemSink sink.Sink + if config.Cache.UseAutoAuthToken { + cacheLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") + inmemSink, err = inmem.New(&sink.SinkConfig{ + Logger: cacheLogger, + }, leaseCache) if err != nil { - c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) return 1 } + sinks = append(sinks, &sink.SinkConfig{ + Logger: cacheLogger, + Sink: inmemSink, + }) + } - tlsCfg = lnBundle.TLSConfig - ln = lnBundle.Listener + proxyVaultToken := !config.Cache.ForceAutoAuthToken - // Track the reload func, so we can reload later if needed. - c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) - } + // Create the request handler + cacheHandler := cache.Handler(ctx, cacheLogger, leaseCache, inmemSink, proxyVaultToken) - listeners = append(listeners, ln) + var listeners []net.Listener - proxyVaultToken := true - var inmemSink sink.Sink - if config.APIProxy != nil { - if config.APIProxy.UseAutoAuthToken { - apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") - inmemSink, err = inmem.New(&sink.SinkConfig{ - Logger: apiProxyLogger, - }, leaseCache) + // If there are templates, add an in-process listener + if len(config.Templates) > 0 { + config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) + } + for i, lnConfig := range config.Listeners { + var ln net.Listener + var tlsConf *tls.Config + + if lnConfig.Type == listenerutil.BufConnType { + inProcListener := bufconn.Listen(1024 * 1024) + config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + ln = inProcListener + } else { + ln, tlsConf, err = cache.StartListener(lnConfig) if err != nil { - c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) return 1 } - sinks = append(sinks, &sink.SinkConfig{ - Logger: apiProxyLogger, - Sink: inmemSink, - }) } - proxyVaultToken = !config.APIProxy.ForceAutoAuthToken - } - var muxHandler http.Handler - if leaseCache != nil { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) - } else { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) - } + listeners = append(listeners, ln) - // Parse 'require_request_header' listener config option, and wrap - // the request handler if necessary - if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { - muxHandler = verifyRequestHeader(muxHandler) - } + // Parse 'require_request_header' listener config option, and wrap + // the request handler if necessary + muxHandler := cacheHandler + if lnConfig.RequireRequestHeader { + muxHandler = verifyRequestHeader(muxHandler) + } - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - quitEnabled := lnConfig.AgentAPI != nil && lnConfig.AgentAPI.EnableQuit + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + quitEnabled := lnConfig.AgentAPI != nil && lnConfig.AgentAPI.EnableQuit - mux.Handle(consts.AgentPathMetrics, c.handleMetrics()) - if "metrics_only" != lnConfig.Role { mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) mux.Handle(consts.AgentPathQuit, c.handleQuit(quitEnabled)) + mux.Handle(consts.AgentPathMetrics, c.handleMetrics()) mux.Handle("/", muxHandler) - } - - scheme := "https://" - if tlsCfg == nil { - scheme = "http://" - } - if ln.Addr().Network() == "unix" { - scheme = "unix://" - } - infoKey := fmt.Sprintf("api address %d", i+1) - info[infoKey] = scheme + ln.Addr().String() - infoKeys = append(infoKeys, infoKey) - - server := &http.Server{ - Addr: ln.Addr().String(), - TLSConfig: tlsCfg, - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: apiProxyLogger.StandardLogger(nil), - } + scheme := "https://" + if tlsConf == nil { + scheme = "http://" + } + if ln.Addr().Network() == "unix" { + scheme = "unix://" + } - go server.Serve(ln) - } + infoKey := fmt.Sprintf("api address %d", i+1) + info[infoKey] = scheme + ln.Addr().String() + infoKeys = append(infoKeys, infoKey) + + server := &http.Server{ + Addr: ln.Addr().String(), + TLSConfig: tlsConf, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } - c.tlsReloadFuncsLock.Unlock() + go server.Serve(ln) + } - // Ensure that listeners are closed at all the exits - listenerCloseFunc := func() { - for _, ln := range listeners { - ln.Close() + // Ensure that listeners are closed at all the exits + listenerCloseFunc := func() { + for _, ln := range listeners { + ln.Close() + } } + defer c.cleanupGuard.Do(listenerCloseFunc) } - defer c.cleanupGuard.Do(listenerCloseFunc) // Inform any tests that the server is ready if c.startedCh != nil { close(c.startedCh) } - var g run.Group + // Listen for signals + // TODO: implement support for SIGHUP reloading of configuration + // signal.Notify(c.signalCh) - g.Add(func() error { - for { - select { - case <-c.SighupCh: - c.UI.Output("==> Vault Agent config reload triggered") - err := c.reloadConfig(c.flagConfigs) - if err != nil { - c.outputErrors(err) - } - // Send the 'reloaded' message on the relevant channel - select { - case c.reloadedCh <- struct{}{}: - default: - } - case <-ctx.Done(): - return nil - } - } - }, func(error) { - cancelFunc() - }) + var g run.Group // This run group watches for signal termination g.Add(func() error { for { select { case <-c.ShutdownCh: - c.UI.Output("==> Vault Agent shutdown triggered") + c.UI.Output("==> Vault agent shutdown triggered") // Notify systemd that the server is shutting down - // Let the lease cache know this is a shutdown; no need to evict everything + c.notifySystemd(systemd.SdNotifyStopping) + // Let the lease cache know this is a shutdown; no need to evict + // everything if leaseCache != nil { leaseCache.SetShuttingDown(true) } return nil case <-ctx.Done(): + c.notifySystemd(systemd.SdNotifyStopping) return nil case <-winsvc.ShutdownChannel(): return nil @@ -680,8 +822,7 @@ func (c *AgentCommand) Run(args []string) int { // Start auto-auth and sink servers if method != nil { - enableTemplateTokenCh := len(config.Templates) > 0 - enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 + enableTokenCh := len(config.Templates) > 0 // Auth Handler is going to set its own retry values, so we want to // work on a copy of the client to not affect other subsystems. @@ -706,35 +847,24 @@ func (c *AgentCommand) Run(args []string) int { MinBackoff: config.AutoAuth.Method.MinBackoff, MaxBackoff: config.AutoAuth.Method.MaxBackoff, EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - EnableTemplateTokenCh: enableTemplateTokenCh, - EnableExecTokenCh: enableEnvTemplateTokenCh, + EnableTemplateTokenCh: enableTokenCh, Token: previousToken, ExitOnError: config.AutoAuth.Method.ExitOnError, - UserAgent: useragent.AgentAutoAuthString(), - MetricsSignifier: "agent", }) ss := sink.NewSinkServer(&sink.SinkServerConfig{ Logger: c.logger.Named("sink.server"), Client: ahClient, - ExitAfterAuth: config.ExitAfterAuth, + ExitAfterAuth: exitAfterAuth, }) ts := template.NewServer(&template.ServerConfig{ Logger: c.logger.Named("template.server"), - LogLevel: c.logger.GetLevel(), + LogLevel: level, LogWriter: c.logWriter, - AgentConfig: c.config, + AgentConfig: config, Namespace: templateNamespace, - ExitAfterAuth: config.ExitAfterAuth, - }) - - es := exec.NewServer(&exec.ServerConfig{ - AgentConfig: c.config, - Namespace: templateNamespace, - Logger: c.logger.Named("exec.server"), - LogLevel: c.logger.GetLevel(), - LogWriter: c.logWriter, + ExitAfterAuth: exitAfterAuth, }) g.Add(func() error { @@ -791,29 +921,17 @@ func (c *AgentCommand) Run(args []string) int { ts.Stop() }) - g.Add(func() error { - return es.Run(ctx, ah.ExecTokenCh) - }, func(err error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - }) - } // Server configuration output padding := 24 sort.Strings(infoKeys) - caser := cases.Title(language.English) - c.UI.Output("==> Vault Agent configuration:\n") + c.UI.Output("==> Vault agent configuration:\n") for _, k := range infoKeys { c.UI.Output(fmt.Sprintf( "%s%s: %s", strings.Repeat(" ", padding-len(k)), - caser.String(k), + strings.Title(k), info[k])) } c.UI.Output("") @@ -836,92 +954,13 @@ func (c *AgentCommand) Run(args []string) int { } }() - var exitCode int if err := g.Run(); err != nil { - var processExitError *exec.ProcessExitError - if errors.As(err, &processExitError) { - exitCode = processExitError.ExitCode - } else { - exitCode = 1 - } - - if exitCode != 0 { - c.logger.Error("runtime error encountered", "error", err, "exitCode", exitCode) - c.UI.Error("Error encountered during run, refer to logs for more details.") - } - } - - c.notifySystemd(systemd.SdNotifyStopping) - - return exitCode -} - -// applyConfigOverrides ensures that the config object accurately reflects the desired -// settings as configured by the user. It applies the relevant config setting based -// on the precedence (env var overrides file config, cli overrides env var). -// It mutates the config object supplied. -func (c *AgentCommand) applyConfigOverrides(f *FlagSets, config *agentConfig.Config) { - if config.Vault == nil { - config.Vault = &agentConfig.Vault{} + c.logger.Error("runtime error encountered", "error", err) + c.UI.Error("Error encountered during run, refer to logs for more details.") + return 1 } - f.applyLogConfigOverrides(config.SharedConfig) - - f.Visit(func(fl *flag.Flag) { - if fl.Name == flagNameAgentExitAfterAuth { - config.ExitAfterAuth = c.flagExitAfterAuth - } - }) - - c.setStringFlag(f, config.Vault.Address, &StringVar{ - Name: flagNameAddress, - Target: &c.flagAddress, - Default: "https://127.0.0.1:8200", - EnvVar: api.EnvVaultAddress, - }) - config.Vault.Address = c.flagAddress - c.setStringFlag(f, config.Vault.CACert, &StringVar{ - Name: flagNameCACert, - Target: &c.flagCACert, - Default: "", - EnvVar: api.EnvVaultCACert, - }) - config.Vault.CACert = c.flagCACert - c.setStringFlag(f, config.Vault.CAPath, &StringVar{ - Name: flagNameCAPath, - Target: &c.flagCAPath, - Default: "", - EnvVar: api.EnvVaultCAPath, - }) - config.Vault.CAPath = c.flagCAPath - c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ - Name: flagNameClientCert, - Target: &c.flagClientCert, - Default: "", - EnvVar: api.EnvVaultClientCert, - }) - config.Vault.ClientCert = c.flagClientCert - c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ - Name: flagNameClientKey, - Target: &c.flagClientKey, - Default: "", - EnvVar: api.EnvVaultClientKey, - }) - config.Vault.ClientKey = c.flagClientKey - c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ - Name: flagNameTLSSkipVerify, - Target: &c.flagTLSSkipVerify, - Default: false, - EnvVar: api.EnvVaultSkipVerify, - }) - config.Vault.TLSSkipVerify = c.flagTLSSkipVerify - c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ - Name: flagTLSServerName, - Target: &c.flagTLSServerName, - Default: "", - EnvVar: api.EnvVaultTLSServerName, - }) - config.Vault.TLSServerName = c.flagTLSServerName + return 0 } // verifyRequestHeader wraps an http.Handler inside a Handler that checks for @@ -1031,6 +1070,19 @@ func (c *AgentCommand) removePidFile(pidPath string) error { return os.Remove(pidPath) } +// GetServiceAccountJWT reads the service account jwt from `tokenFile`. Default is +// the default service account file path in kubernetes. +func getServiceAccountJWT(tokenFile string) (string, error) { + if len(tokenFile) == 0 { + tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + token, err := ioutil.ReadFile(tokenFile) + if err != nil { + return "", err + } + return strings.TrimSpace(string(token)), nil +} + func (c *AgentCommand) handleMetrics() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { @@ -1056,7 +1108,7 @@ func (c *AgentCommand) handleMetrics() http.Handler { w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) switch v := resp.Data[logical.HTTPRawBody].(type) { case string: - w.WriteHeader(status) + w.WriteHeader((status)) w.Write([]byte(v)) case []byte: w.WriteHeader(status) @@ -1085,163 +1137,3 @@ func (c *AgentCommand) handleQuit(enabled bool) http.Handler { close(c.ShutdownCh) }) } - -// newLogger creates a logger based on parsed config field on the Agent Command struct. -func (c *AgentCommand) newLogger() (log.InterceptLogger, error) { - if c.config == nil { - return nil, fmt.Errorf("cannot create logger, no config") - } - - var errors error - - // Parse all the log related config - logLevel, err := logging.ParseLogLevel(c.config.LogLevel) - if err != nil { - errors = multierror.Append(errors, err) - } - - logFormat, err := logging.ParseLogFormat(c.config.LogFormat) - if err != nil { - errors = multierror.Append(errors, err) - } - - logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) - if err != nil { - errors = multierror.Append(errors, err) - } - - if errors != nil { - return nil, errors - } - - logCfg := &logging.LogConfig{ - Name: "agent", - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: c.config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: c.config.LogRotateBytes, - LogRotateMaxFiles: c.config.LogRotateMaxFiles, - } - - l, err := logging.Setup(logCfg, c.logWriter) - if err != nil { - return nil, err - } - - return l, nil -} - -// loadConfig attempts to generate an Agent config from the file(s) specified. -func (c *AgentCommand) loadConfig(paths []string) (*agentConfig.Config, error) { - var errors error - cfg := agentConfig.NewConfig() - - for _, configPath := range paths { - configFromPath, err := agentConfig.LoadConfig(configPath) - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) - } else { - cfg = cfg.Merge(configFromPath) - } - } - - if errors != nil { - return nil, errors - } - - if err := cfg.ValidateConfig(); err != nil { - return nil, fmt.Errorf("error validating configuration: %w", err) - } - - return cfg, nil -} - -// reloadConfig will attempt to reload the config from file(s) and adjust certain -// config values without requiring a restart of the Vault Agent. -// If config is retrieved without error it is stored in the config field of the AgentCommand. -// This operation is not atomic and could result in updated config but partially applied config settings. -// The error returned from this func may be a multierror. -// This function will most likely be called due to Vault Agent receiving a SIGHUP signal. -// Currently only reloading the following are supported: -// * log level -// * TLS certs for listeners -func (c *AgentCommand) reloadConfig(paths []string) error { - // Notify systemd that the server is reloading - c.notifySystemd(systemd.SdNotifyReloading) - defer c.notifySystemd(systemd.SdNotifyReady) - - var errors error - - // Reload the config - cfg, err := c.loadConfig(paths) - if err != nil { - // Returning single error as we won't continue with bad config and won't 'commit' it. - return err - } - c.config = cfg - - // Update the log level - err = c.reloadLogLevel() - if err != nil { - errors = multierror.Append(errors, err) - } - - // Update certs - err = c.reloadCerts() - if err != nil { - errors = multierror.Append(errors, err) - } - - return errors -} - -// reloadLogLevel will attempt to update the log level for the logger attached -// to the AgentComment struct using the value currently set in config. -func (c *AgentCommand) reloadLogLevel() error { - logLevel, err := logging.ParseLogLevel(c.config.LogLevel) - if err != nil { - return err - } - - c.logger.SetLevel(logLevel) - - return nil -} - -// reloadCerts will attempt to reload certificates using a reload func which -// was provided when the listeners were configured, only funcs that were appended -// to the AgentCommand slice will be invoked. -// This function returns a multierror type so that every func can report an error -// if it encounters one. -func (c *AgentCommand) reloadCerts() error { - var errors error - - c.tlsReloadFuncsLock.RLock() - defer c.tlsReloadFuncsLock.RUnlock() - - for _, reloadFunc := range c.tlsReloadFuncs { - // Non-TLS listeners will have a nil reload func. - if reloadFunc != nil { - err := reloadFunc() - if err != nil { - errors = multierror.Append(errors, err) - } - } - } - - return errors -} - -// outputErrors will take an error or multierror and handle outputting each to the UI -func (c *AgentCommand) outputErrors(err error) { - if err != nil { - if me, ok := err.(*multierror.Error); ok { - for _, err := range me.Errors { - c.UI.Error(err.Error()) - } - } else { - c.UI.Error(err.Error()) - } - } -} diff --git a/command/agent/alicloud_end_to_end_test.go b/command/agent/alicloud_end_to_end_test.go index 0f5cdfbe4ecbe..948f9fa5accf4 100644 --- a/command/agent/alicloud_end_to_end_test.go +++ b/command/agent/alicloud_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -19,11 +16,10 @@ import ( uuid "github.com/hashicorp/go-uuid" vaultalicloud "github.com/hashicorp/vault-plugin-auth-alicloud" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentalicloud "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/command/agent/auth" + agentalicloud "github.com/hashicorp/vault/command/agent/auth/alicloud" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -41,14 +37,6 @@ func TestAliCloudEndToEnd(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - credNames := []string{ - envVarAlicloudAccessKey, - envVarAlicloudSecretKey, - envVarAlicloudRoleArn, - } - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ Logger: logger, diff --git a/command/agent/approle_end_to_end_test.go b/command/agent/approle_end_to_end_test.go index 515a13ec52f8d..35186cd8e606e 100644 --- a/command/agent/approle_end_to_end_test.go +++ b/command/agent/approle_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -9,17 +6,16 @@ import ( "fmt" "io/ioutil" "os" - "strings" "testing" "time" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agent/auth" + agentapprole "github.com/hashicorp/vault/command/agent/auth/approle" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -58,7 +54,6 @@ func TestAppRoleEndToEnd(t *testing.T) { if tc.removeSecretIDFile { secretFileAction = "remove" } - tc := tc // capture range variable t.Run(fmt.Sprintf("%s_secret_id_file bindSecretID=%v secretIDLess=%v expectToken=%v", secretFileAction, tc.bindSecretID, tc.secretIDLess, tc.expectToken), func(t *testing.T) { t.Parallel() testAppRoleEndToEnd(t, tc.removeSecretIDFile, tc.bindSecretID, tc.secretIDLess, tc.expectToken) @@ -404,52 +399,6 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo } } -// TestAppRoleLongRoleName tests that the creation of an approle is a maximum of 4096 bytes -// Prior to VAULT-8518 being fixed, you were unable to delete an approle value longer than 1024 bytes -// due to a restriction put into place by PR #14746, to prevent unbounded HMAC creation. -func TestAppRoleLongRoleName(t *testing.T) { - approleName := strings.Repeat("a", 5000) - - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - - vault.TestWaitActive(t, cores[0].Core) - - client := cores[0].Client - - err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ - Type: "approle", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write(fmt.Sprintf("auth/approle/role/%s", approleName), map[string]interface{}{ - "token_ttl": "6s", - "token_max_ttl": "10s", - }) - if err != nil { - if !strings.Contains(err.Error(), "role_name is longer than maximum") { - t.Fatal(err) - } - } -} - func TestAppRoleWithWrapping(t *testing.T) { testCases := []struct { bindSecretID bool diff --git a/command/agentproxyshared/auth/alicloud/alicloud.go b/command/agent/auth/alicloud/alicloud.go similarity index 98% rename from command/agentproxyshared/auth/alicloud/alicloud.go rename to command/agent/auth/alicloud/alicloud.go index 724597682a1f3..6fc640c290e0a 100644 --- a/command/agentproxyshared/auth/alicloud/alicloud.go +++ b/command/agent/auth/alicloud/alicloud.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package alicloud import ( @@ -17,7 +14,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault-plugin-auth-alicloud/tools" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" ) /* diff --git a/command/agentproxyshared/auth/approle/approle.go b/command/agent/auth/approle/approle.go similarity index 98% rename from command/agentproxyshared/auth/approle/approle.go rename to command/agent/auth/approle/approle.go index 9f33980a9f546..e58299ad7b2e8 100644 --- a/command/agentproxyshared/auth/approle/approle.go +++ b/command/agent/auth/approle/approle.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package approle import ( @@ -15,7 +12,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" ) type approleMethod struct { diff --git a/command/agent/auth/auth.go b/command/agent/auth/auth.go new file mode 100644 index 0000000000000..854052adc5cbd --- /dev/null +++ b/command/agent/auth/auth.go @@ -0,0 +1,445 @@ +package auth + +import ( + "context" + "encoding/json" + "errors" + "math/rand" + "net/http" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + defaultMinBackoff = 1 * time.Second + defaultMaxBackoff = 5 * time.Minute +) + +// AuthMethod is the interface that auto-auth methods implement for the agent +// to use. +type AuthMethod interface { + // Authenticate returns a mount path, header, request body, and error. + // The header may be nil if no special header is needed. + Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) + NewCreds() chan struct{} + CredSuccess() + Shutdown() +} + +// AuthMethodWithClient is an extended interface that can return an API client +// for use during the authentication call. +type AuthMethodWithClient interface { + AuthMethod + AuthClient(client *api.Client) (*api.Client, error) +} + +type AuthConfig struct { + Logger hclog.Logger + MountPath string + WrapTTL time.Duration + Config map[string]interface{} +} + +// AuthHandler is responsible for keeping a token alive and renewed and passing +// new tokens to the sink server +type AuthHandler struct { + OutputCh chan string + TemplateTokenCh chan string + token string + logger hclog.Logger + client *api.Client + random *rand.Rand + wrapTTL time.Duration + maxBackoff time.Duration + minBackoff time.Duration + enableReauthOnNewCredentials bool + enableTemplateTokenCh bool + exitOnError bool +} + +type AuthHandlerConfig struct { + Logger hclog.Logger + Client *api.Client + WrapTTL time.Duration + MaxBackoff time.Duration + MinBackoff time.Duration + Token string + EnableReauthOnNewCredentials bool + EnableTemplateTokenCh bool + ExitOnError bool +} + +func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { + ah := &AuthHandler{ + // This is buffered so that if we try to output after the sink server + // has been shut down, during agent shutdown, we won't block + OutputCh: make(chan string, 1), + TemplateTokenCh: make(chan string, 1), + token: conf.Token, + logger: conf.Logger, + client: conf.Client, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + wrapTTL: conf.WrapTTL, + minBackoff: conf.MinBackoff, + maxBackoff: conf.MaxBackoff, + enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, + enableTemplateTokenCh: conf.EnableTemplateTokenCh, + exitOnError: conf.ExitOnError, + } + + return ah +} + +func backoff(ctx context.Context, backoff *agentBackoff) bool { + if backoff.exitOnErr { + return false + } + + select { + case <-time.After(backoff.current): + case <-ctx.Done(): + } + + // Increase exponential backoff for the next time if we don't + // successfully auth/renew/etc. + backoff.next() + return true +} + +func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { + if am == nil { + return errors.New("auth handler: nil auth method") + } + + if ah.minBackoff <= 0 { + ah.minBackoff = defaultMinBackoff + } + + backoffCfg := newAgentBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) + + if backoffCfg.min >= backoffCfg.max { + return errors.New("auth handler: min_backoff cannot be greater than max_backoff") + } + + ah.logger.Info("starting auth handler") + defer func() { + am.Shutdown() + close(ah.OutputCh) + close(ah.TemplateTokenCh) + ah.logger.Info("auth handler stopped") + }() + + credCh := am.NewCreds() + if !ah.enableReauthOnNewCredentials { + realCredCh := credCh + credCh = nil + if realCredCh != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-realCredCh: + } + } + }() + } + } + if credCh == nil { + credCh = make(chan struct{}) + } + + var watcher *api.LifetimeWatcher + first := true + + for { + select { + case <-ctx.Done(): + return nil + + default: + } + + var clientToUse *api.Client + var err error + var path string + var data map[string]interface{} + var header http.Header + + switch am.(type) { + case AuthMethodWithClient: + clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) + if err != nil { + ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + + return err + } + default: + clientToUse = ah.client + } + + // Disable retry on the client to ensure our backoffOrQuit function is + // the only source of retry/backoff. + clientToUse.SetMaxRetries(0) + + var secret *api.Secret = new(api.Secret) + if first && ah.token != "" { + ah.logger.Debug("using preloaded token") + + first = false + ah.logger.Debug("lookup-self with preloaded token") + clientToUse.SetToken(ah.token) + + secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) + if err != nil { + ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + secret.Auth = &api.SecretAuth{ + ClientToken: secret.Data["id"].(string), + LeaseDuration: int(duration), + Renewable: secret.Data["renewable"].(bool), + } + } else { + ah.logger.Info("authenticating") + + path, header, data, err = am.Authenticate(ctx, ah.client) + if err != nil { + ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + } + + if ah.wrapTTL > 0 { + wrapClient, err := clientToUse.Clone() + if err != nil { + ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + wrapClient.SetWrappingLookupFunc(func(string, string) string { + return ah.wrapTTL.String() + }) + clientToUse = wrapClient + } + for key, values := range header { + for _, value := range values { + clientToUse.AddHeader(key, value) + } + } + + // This should only happen if there's no preloaded token (regular auto-auth login) + // or if a preloaded token has expired and is now switching to auto-auth. + if secret.Auth == nil { + secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) + // Check errors/sanity + if err != nil { + ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + } + + switch { + case ah.wrapTTL > 0: + if secret.WrapInfo == nil { + ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + if secret.WrapInfo.Token == "" { + ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) + if err != nil { + ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") + ah.OutputCh <- string(wrappedResp) + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- string(wrappedResp) + } + + am.CredSuccess() + backoffCfg.reset() + + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered") + continue + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + continue + } + + default: + if secret == nil || secret.Auth == nil { + ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + if secret.Auth.ClientToken == "" { + ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + ah.logger.Info("authentication successful, sending token to sinks") + ah.OutputCh <- secret.Auth.ClientToken + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- secret.Auth.ClientToken + } + + am.CredSuccess() + backoffCfg.reset() + } + + if watcher != nil { + watcher.Stop() + } + + watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + + // Start the renewal process + ah.logger.Info("starting renewal process") + metrics.IncrCounter([]string{"agent", "auth", "success"}, 1) + go watcher.Renew() + + LifetimeWatcherLoop: + for { + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered, stopping lifetime watcher") + watcher.Stop() + break LifetimeWatcherLoop + + case err := <-watcher.DoneCh(): + ah.logger.Info("lifetime watcher done channel triggered") + if err != nil { + metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) + ah.logger.Error("error renewing token", "error", err) + } + break LifetimeWatcherLoop + + case <-watcher.RenewCh(): + metrics.IncrCounter([]string{"agent", "auth", "success"}, 1) + ah.logger.Info("renewed auth token") + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + break LifetimeWatcherLoop + } + } + } +} + +// agentBackoff tracks exponential backoff state. +type agentBackoff struct { + min time.Duration + max time.Duration + current time.Duration + exitOnErr bool +} + +func newAgentBackoff(min, max time.Duration, exitErr bool) *agentBackoff { + if max <= 0 { + max = defaultMaxBackoff + } + + if min <= 0 { + min = defaultMinBackoff + } + + return &agentBackoff{ + current: min, + max: max, + min: min, + exitOnErr: exitErr, + } +} + +// next determines the next backoff duration that is roughly twice +// the current value, capped to a max value, with a measure of randomness. +func (b *agentBackoff) next() { + maxBackoff := 2 * b.current + + if maxBackoff > b.max { + maxBackoff = b.max + } + + // Trim a random amount (0-25%) off the doubled duration + trim := rand.Int63n(int64(maxBackoff) / 4) + b.current = maxBackoff - time.Duration(trim) +} + +func (b *agentBackoff) reset() { + b.current = b.min +} + +func (b agentBackoff) String() string { + return b.current.Truncate(10 * time.Millisecond).String() +} diff --git a/command/agentproxyshared/auth/auth_test.go b/command/agent/auth/auth_test.go similarity index 95% rename from command/agentproxyshared/auth/auth_test.go rename to command/agent/auth/auth_test.go index 5729435020fba..9501342749bb4 100644 --- a/command/agentproxyshared/auth/auth_test.go +++ b/command/agent/auth/auth_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package auth import ( @@ -112,7 +109,7 @@ consumption: func TestAgentBackoff(t *testing.T) { max := 1024 * time.Second - backoff := newAutoAuthBackoff(defaultMinBackoff, max, false) + backoff := newAgentBackoff(defaultMinBackoff, max, false) // Test initial value if backoff.current != defaultMinBackoff { @@ -162,7 +159,7 @@ func TestAgentMinBackoffCustom(t *testing.T) { for _, test := range tests { max := 1024 * time.Second - backoff := newAutoAuthBackoff(test.minBackoff, max, false) + backoff := newAgentBackoff(test.minBackoff, max, false) // Test initial value if backoff.current != test.want { diff --git a/command/agentproxyshared/auth/aws/aws.go b/command/agent/auth/aws/aws.go similarity index 98% rename from command/agentproxyshared/auth/aws/aws.go rename to command/agent/auth/aws/aws.go index 53d1623be8a4c..b9dbdd5499c82 100644 --- a/command/agentproxyshared/auth/aws/aws.go +++ b/command/agent/auth/aws/aws.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aws import ( @@ -20,7 +17,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/awsutil" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" ) const ( diff --git a/command/agentproxyshared/auth/azure/azure.go b/command/agent/auth/azure/azure.go similarity index 96% rename from command/agentproxyshared/auth/azure/azure.go rename to command/agent/auth/azure/azure.go index 77e6613971b37..528e82ffe6cc2 100644 --- a/command/agentproxyshared/auth/azure/azure.go +++ b/command/agent/auth/azure/azure.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package azure import ( @@ -13,9 +10,9 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/command/agent/auth" "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/useragent" ) const ( diff --git a/command/agent/auth/cert/cert.go b/command/agent/auth/cert/cert.go new file mode 100644 index 0000000000000..2703aa8ecd589 --- /dev/null +++ b/command/agent/auth/cert/cert.go @@ -0,0 +1,146 @@ +package cert + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +type certMethod struct { + logger hclog.Logger + mountPath string + name string + + caCert string + clientCert string + clientKey string + + // Client is the cached client to use if cert info was provided. + client *api.Client +} + +var _ auth.AuthMethodWithClient = &certMethod{} + +func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + + // Not concerned if the conf.Config is empty as the 'name' + // parameter is optional when using TLS Auth + + c := &certMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + if conf.Config != nil { + nameRaw, ok := conf.Config["name"] + if !ok { + nameRaw = "" + } + c.name, ok = nameRaw.(string) + if !ok { + return nil, errors.New("could not convert 'name' config value to string") + } + + caCertRaw, ok := conf.Config["ca_cert"] + if ok { + c.caCert, ok = caCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'ca_cert' config value to string") + } + } + + clientCertRaw, ok := conf.Config["client_cert"] + if ok { + c.clientCert, ok = clientCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_file' config value to string") + } + } + + clientKeyRaw, ok := conf.Config["client_key"] + if ok { + c.clientKey, ok = clientKeyRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_key' config value to string") + } + } + } + + return c, nil +} + +func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + c.logger.Trace("beginning authentication") + + authMap := map[string]interface{}{} + + if c.name != "" { + authMap["name"] = c.name + } + + return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil +} + +func (c *certMethod) NewCreds() chan struct{} { + return nil +} + +func (c *certMethod) CredSuccess() {} + +func (c *certMethod) Shutdown() {} + +// AuthClient uses the existing client's address and returns a new client with +// the auto-auth method's certificate information if that's provided in its +// config map. +func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { + c.logger.Trace("deriving auth client to use") + + clientToAuth := client + + if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { + // Return cached client if present + if c.client != nil { + return c.client, nil + } + + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + config.Address = client.Address() + + t := &api.TLSConfig{ + CACert: c.caCert, + ClientCert: c.clientCert, + ClientKey: c.clientKey, + } + + // Setup TLS config + if err := config.ConfigureTLS(t); err != nil { + return nil, err + } + + var err error + clientToAuth, err = api.NewClient(config) + if err != nil { + return nil, err + } + if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { + clientToAuth.SetNamespace(ns) + } + + // Cache the client for future use + c.client = clientToAuth + } + + return clientToAuth, nil +} diff --git a/command/agent/auth/cert/cert_test.go b/command/agent/auth/cert/cert_test.go new file mode 100644 index 0000000000000..15ff8f4327f37 --- /dev/null +++ b/command/agent/auth/cert/cert_test.go @@ -0,0 +1,132 @@ +package cert + +import ( + "context" + "os" + "path" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" +) + +func TestCertAuthMethod_Authenticate(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "foo", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + loginPath, _, authMap, err := method.Authenticate(context.Background(), client) + if err != nil { + t.Fatal(err) + } + + expectedLoginPath := path.Join(config.MountPath, "/login") + if loginPath != expectedLoginPath { + t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) + } + + expectedAuthMap := map[string]interface{}{ + "name": config.Config["name"], + } + if !reflect.DeepEqual(authMap, expectedAuthMap) { + t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) + } +} + +func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "without-certs", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client != clientToUse { + t.Fatal("error: expected AuthClient to return back original client") + } +} + +func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back the cached client + cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if cachedClient != clientToUse { + t.Fatal("expected client from AuthClient to return back a cached client") + } +} diff --git a/command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem b/command/agent/auth/cert/test-fixtures/keys/cert.pem similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem rename to command/agent/auth/cert/test-fixtures/keys/cert.pem diff --git a/command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem b/command/agent/auth/cert/test-fixtures/keys/key.pem similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem rename to command/agent/auth/cert/test-fixtures/keys/key.pem diff --git a/command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput b/command/agent/auth/cert/test-fixtures/keys/pkioutput similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput rename to command/agent/auth/cert/test-fixtures/keys/pkioutput diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput b/command/agent/auth/cert/test-fixtures/root/pkioutput similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput rename to command/agent/auth/cert/test-fixtures/root/pkioutput diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/root.crl b/command/agent/auth/cert/test-fixtures/root/root.crl similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/root/root.crl rename to command/agent/auth/cert/test-fixtures/root/root.crl diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem b/command/agent/auth/cert/test-fixtures/root/rootcacert.pem similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem rename to command/agent/auth/cert/test-fixtures/root/rootcacert.pem diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem b/command/agent/auth/cert/test-fixtures/root/rootcakey.pem similarity index 100% rename from command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem rename to command/agent/auth/cert/test-fixtures/root/rootcakey.pem diff --git a/command/agentproxyshared/auth/cf/cf.go b/command/agent/auth/cf/cf.go similarity index 94% rename from command/agentproxyshared/auth/cf/cf.go rename to command/agent/auth/cf/cf.go index 3ee2077fb7132..9508b7164f2be 100644 --- a/command/agentproxyshared/auth/cf/cf.go +++ b/command/agent/auth/cf/cf.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cf import ( @@ -15,7 +12,7 @@ import ( cf "github.com/hashicorp/vault-plugin-auth-cf" "github.com/hashicorp/vault-plugin-auth-cf/signatures" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" ) type cfMethod struct { diff --git a/command/agentproxyshared/auth/gcp/gcp.go b/command/agent/auth/gcp/gcp.go similarity index 97% rename from command/agentproxyshared/auth/gcp/gcp.go rename to command/agent/auth/gcp/gcp.go index bb7c6bab6d57f..45d9b74f9497b 100644 --- a/command/agentproxyshared/auth/gcp/gcp.go +++ b/command/agent/auth/gcp/gcp.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package gcp import ( @@ -17,7 +14,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" "golang.org/x/oauth2" "google.golang.org/api/iamcredentials/v1" ) diff --git a/command/agent/auth/jwt/jwt.go b/command/agent/auth/jwt/jwt.go new file mode 100644 index 0000000000000..8f088eb199e5e --- /dev/null +++ b/command/agent/auth/jwt/jwt.go @@ -0,0 +1,214 @@ +package jwt + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +type jwtMethod struct { + logger hclog.Logger + path string + mountPath string + role string + removeJWTAfterReading bool + credsFound chan struct{} + watchCh chan string + stopCh chan struct{} + doneCh chan struct{} + credSuccessGate chan struct{} + ticker *time.Ticker + once *sync.Once + latestToken *atomic.Value +} + +// NewJWTAuthMethod returns an implementation of Agent's auth.AuthMethod +// interface for JWT auth. +func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + j := &jwtMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + removeJWTAfterReading: true, + credsFound: make(chan struct{}), + watchCh: make(chan string), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + credSuccessGate: make(chan struct{}), + once: new(sync.Once), + latestToken: new(atomic.Value), + } + j.latestToken.Store("") + + pathRaw, ok := conf.Config["path"] + if !ok { + return nil, errors.New("missing 'path' value") + } + j.path, ok = pathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'path' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + j.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + if removeJWTAfterReadingRaw, ok := conf.Config["remove_jwt_after_reading"]; ok { + removeJWTAfterReading, err := parseutil.ParseBool(removeJWTAfterReadingRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_jwt_after_reading' value: %w", err) + } + j.removeJWTAfterReading = removeJWTAfterReading + } + + switch { + case j.path == "": + return nil, errors.New("'path' value is empty") + case j.role == "": + return nil, errors.New("'role' value is empty") + } + + // If we don't delete the JWT after reading, use a slower reload period, + // otherwise we would re-read the whole file every 500ms, instead of just + // doing a stat on the file every 500ms. + readPeriod := 1 * time.Minute + if j.removeJWTAfterReading { + readPeriod = 500 * time.Millisecond + } + j.ticker = time.NewTicker(readPeriod) + + go j.runWatcher() + + j.logger.Info("jwt auth method created", "path", j.path) + + return j, nil +} + +func (j *jwtMethod) Authenticate(_ context.Context, _ *api.Client) (string, http.Header, map[string]interface{}, error) { + j.logger.Trace("beginning authentication") + + j.ingressToken() + + latestToken := j.latestToken.Load().(string) + if latestToken == "" { + return "", nil, nil, errors.New("latest known jwt is empty, cannot authenticate") + } + + return fmt.Sprintf("%s/login", j.mountPath), nil, map[string]interface{}{ + "role": j.role, + "jwt": latestToken, + }, nil +} + +func (j *jwtMethod) NewCreds() chan struct{} { + return j.credsFound +} + +func (j *jwtMethod) CredSuccess() { + j.once.Do(func() { + close(j.credSuccessGate) + }) +} + +func (j *jwtMethod) Shutdown() { + j.ticker.Stop() + close(j.stopCh) + <-j.doneCh +} + +func (j *jwtMethod) runWatcher() { + defer close(j.doneCh) + + select { + case <-j.stopCh: + return + + case <-j.credSuccessGate: + // We only start the next loop once we're initially successful, + // since at startup Authenticate will be called and we don't want + // to end up immediately reauthenticating by having found a new + // value + } + + for { + select { + case <-j.stopCh: + return + + case <-j.ticker.C: + latestToken := j.latestToken.Load().(string) + j.ingressToken() + newToken := j.latestToken.Load().(string) + if newToken != latestToken { + j.logger.Debug("new jwt file found") + j.credsFound <- struct{}{} + } + } + } +} + +func (j *jwtMethod) ingressToken() { + fi, err := os.Lstat(j.path) + if err != nil { + if os.IsNotExist(err) { + return + } + j.logger.Error("error encountered stat'ing jwt file", "error", err) + return + } + + // Check that the path refers to a file. + // If it's a symlink, it could still be a symlink to a directory, + // but os.ReadFile below will return a descriptive error. + switch mode := fi.Mode(); { + case mode.IsRegular(): + // regular file + case mode&fs.ModeSymlink != 0: + // symlink + default: + j.logger.Error("jwt file is not a regular file or symlink") + return + } + + token, err := os.ReadFile(j.path) + if err != nil { + j.logger.Error("failed to read jwt file", "error", err) + return + } + + switch len(token) { + case 0: + j.logger.Warn("empty jwt file read") + + default: + j.latestToken.Store(string(token)) + } + + if j.removeJWTAfterReading { + if err := os.Remove(j.path); err != nil { + j.logger.Error("error removing jwt file", "error", err) + } + } +} diff --git a/command/agent/auth/jwt/jwt_test.go b/command/agent/auth/jwt/jwt_test.go new file mode 100644 index 0000000000000..8e9a2ae86c136 --- /dev/null +++ b/command/agent/auth/jwt/jwt_test.go @@ -0,0 +1,167 @@ +package jwt + +import ( + "bytes" + "os" + "path" + "strings" + "sync/atomic" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/auth" +) + +func TestIngressToken(t *testing.T) { + const ( + dir = "dir" + file = "file" + empty = "empty" + missing = "missing" + symlinked = "symlinked" + ) + + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + + setupTestDir := func() string { + testDir, err := os.MkdirTemp(rootDir, "") + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + _, err = os.Create(path.Join(testDir, empty)) + if err != nil { + t.Fatal(err) + } + err = os.Mkdir(path.Join(testDir, dir), 0o755) + if err != nil { + t.Fatal(err) + } + err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) + if err != nil { + t.Fatal(err) + } + + return testDir + } + + for _, tc := range []struct { + name string + path string + errString string + }{ + { + "happy path", + file, + "", + }, + { + "path is directory", + dir, + "[ERROR] jwt file is not a regular file or symlink", + }, + { + "path is symlink", + symlinked, + "", + }, + { + "path is missing (implies nothing for ingressToken to do)", + missing, + "", + }, + { + "path is empty file", + empty, + "[WARN] empty jwt file read", + }, + } { + testDir := setupTestDir() + logBuffer := bytes.Buffer{} + jwtAuth := &jwtMethod{ + logger: hclog.New(&hclog.LoggerOptions{ + Output: &logBuffer, + }), + latestToken: new(atomic.Value), + path: path.Join(testDir, tc.path), + } + + jwtAuth.ingressToken() + + if tc.errString != "" { + if !strings.Contains(logBuffer.String(), tc.errString) { + t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) + } + } else { + if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { + t.Fatal("logs contained unexpected error", logBuffer.String()) + } + } + } +} + +func TestDeleteAfterReading(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + }{ + "default": { + "", + true, + }, + "explicit true": { + "true", + true, + }, + "false": { + "false", + false, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + tokenPath := path.Join(rootDir, "token") + err = os.WriteFile(tokenPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "path": tokenPath, + "role": "unusedrole", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_jwt_after_reading"] = tc.configValue + } + + jwtAuth, err := NewJWTAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + jwtAuth.(*jwtMethod).ingressToken() + + if _, err := os.Lstat(tokenPath); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh b/command/agent/auth/kerberos/integtest/integrationtest.sh similarity index 98% rename from command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh rename to command/agent/auth/kerberos/integtest/integrationtest.sh index b3d9edf65db9e..28da55f599cf6 100755 --- a/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh +++ b/command/agent/auth/kerberos/integtest/integrationtest.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Instructions # This integration test is for the Vault Kerberos agent. # Before running, execute: diff --git a/command/agentproxyshared/auth/kerberos/kerberos.go b/command/agent/auth/kerberos/kerberos.go similarity index 95% rename from command/agentproxyshared/auth/kerberos/kerberos.go rename to command/agent/auth/kerberos/kerberos.go index 67a3109f5f5ec..894c177d5c8a9 100644 --- a/command/agentproxyshared/auth/kerberos/kerberos.go +++ b/command/agent/auth/kerberos/kerberos.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kerberos import ( @@ -13,7 +10,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" kerberos "github.com/hashicorp/vault-plugin-auth-kerberos" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" "github.com/jcmturner/gokrb5/v8/spnego" ) diff --git a/command/agentproxyshared/auth/kerberos/kerberos_test.go b/command/agent/auth/kerberos/kerberos_test.go similarity index 94% rename from command/agentproxyshared/auth/kerberos/kerberos_test.go rename to command/agent/auth/kerberos/kerberos_test.go index 070893d758767..4cfe3479ed4ce 100644 --- a/command/agentproxyshared/auth/kerberos/kerberos_test.go +++ b/command/agent/auth/kerberos/kerberos_test.go @@ -1,13 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kerberos import ( "testing" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" ) func TestNewKerberosAuthMethod(t *testing.T) { diff --git a/command/agentproxyshared/auth/kubernetes/kubernetes.go b/command/agent/auth/kubernetes/kubernetes.go similarity index 95% rename from command/agentproxyshared/auth/kubernetes/kubernetes.go rename to command/agent/auth/kubernetes/kubernetes.go index acbb8c044cb58..c30f3cb5a68b1 100644 --- a/command/agentproxyshared/auth/kubernetes/kubernetes.go +++ b/command/agent/auth/kubernetes/kubernetes.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( @@ -15,7 +12,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" ) const ( diff --git a/command/agentproxyshared/auth/kubernetes/kubernetes_test.go b/command/agent/auth/kubernetes/kubernetes_test.go similarity index 97% rename from command/agentproxyshared/auth/kubernetes/kubernetes_test.go rename to command/agent/auth/kubernetes/kubernetes_test.go index cbf617029c6ab..34f965c770960 100644 --- a/command/agentproxyshared/auth/kubernetes/kubernetes_test.go +++ b/command/agent/auth/kubernetes/kubernetes_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( @@ -12,7 +9,7 @@ import ( "github.com/hashicorp/errwrap" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agent/auth" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agent/auto_auth_preload_token_end_to_end_test.go b/command/agent/auto_auth_preload_token_end_to_end_test.go index 004e817dac129..3f8d972a32cf7 100644 --- a/command/agent/auto_auth_preload_token_end_to_end_test.go +++ b/command/agent/auto_auth_preload_token_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -13,10 +10,10 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentAppRole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agent/auth" + agentAppRole "github.com/hashicorp/vault/command/agent/auth/approle" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" diff --git a/command/agent/aws_end_to_end_test.go b/command/agent/aws_end_to_end_test.go index 08644bdc1d2e5..ca7b419648c75 100644 --- a/command/agent/aws_end_to_end_test.go +++ b/command/agent/aws_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -18,11 +15,10 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" vaultaws "github.com/hashicorp/vault/builtin/credential/aws" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentaws "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/command/agent/auth" + agentaws "github.com/hashicorp/vault/command/agent/auth/aws" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -51,14 +47,6 @@ func TestAWSEndToEnd(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - credNames := []string{ - envVarAwsTestAccessKey, - envVarAwsTestSecretKey, - envVarAwsTestRoleArn, - } - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ Logger: logger, diff --git a/command/agent/cache/api_proxy.go b/command/agent/cache/api_proxy.go new file mode 100644 index 0000000000000..1a754e064ec16 --- /dev/null +++ b/command/agent/cache/api_proxy.go @@ -0,0 +1,146 @@ +package cache + +import ( + "context" + "fmt" + "sync" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/http" +) + +type EnforceConsistency int + +const ( + EnforceConsistencyNever EnforceConsistency = iota + EnforceConsistencyAlways +) + +type WhenInconsistentAction int + +const ( + WhenInconsistentFail WhenInconsistentAction = iota + WhenInconsistentRetry + WhenInconsistentForward +) + +// APIProxy is an implementation of the proxier interface that is used to +// forward the request to Vault and get the response. +type APIProxy struct { + client *api.Client + logger hclog.Logger + enforceConsistency EnforceConsistency + whenInconsistentAction WhenInconsistentAction + l sync.RWMutex + lastIndexStates []string +} + +var _ Proxier = &APIProxy{} + +type APIProxyConfig struct { + Client *api.Client + Logger hclog.Logger + EnforceConsistency EnforceConsistency + WhenInconsistentAction WhenInconsistentAction +} + +func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { + if config.Client == nil { + return nil, fmt.Errorf("nil API client") + } + return &APIProxy{ + client: config.Client, + logger: config.Logger, + enforceConsistency: config.EnforceConsistency, + whenInconsistentAction: config.WhenInconsistentAction, + }, nil +} + +func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + client, err := ap.client.Clone() + if err != nil { + return nil, err + } + client.SetToken(req.Token) + + // Derive and set a logger for the client + clientLogger := ap.logger.Named("client") + client.SetLogger(clientLogger) + + // http.Transport will transparently request gzip and decompress the response, but only if + // the client doesn't manually set the header. Removing any Accept-Encoding header allows the + // transparent compression to occur. + req.Request.Header.Del("Accept-Encoding") + client.SetHeaders(req.Request.Header) + + fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) + fwReq.BodyBytes = req.RequestBody + + query := req.Request.URL.Query() + if len(query) != 0 { + fwReq.Params = query + } + + var newState string + manageState := ap.enforceConsistency == EnforceConsistencyAlways && + req.Request.Header.Get(http.VaultIndexHeaderName) == "" && + req.Request.Header.Get(http.VaultForwardHeaderName) == "" && + req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" + + if manageState { + client = client.WithResponseCallbacks(api.RecordState(&newState)) + ap.l.RLock() + lastStates := ap.lastIndexStates + ap.l.RUnlock() + if len(lastStates) != 0 { + client = client.WithRequestCallbacks(api.RequireState(lastStates...)) + switch ap.whenInconsistentAction { + case WhenInconsistentFail: + // In this mode we want to delegate handling of inconsistency + // failures to the external client talking to Agent. + client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) + case WhenInconsistentRetry: + // In this mode we want to handle retries due to inconsistency + // internally. This is the default api.Client behaviour so + // we needn't do anything. + case WhenInconsistentForward: + fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) + } + } + } + + // Make the request to Vault and get the response + ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) + + resp, err := client.RawRequestWithContext(ctx, fwReq) + if resp == nil && err != nil { + // We don't want to cache nil responses, so we simply return the error + return nil, err + } + + if newState != "" { + ap.l.Lock() + // We want to be using the "newest" states seen, but newer isn't well + // defined here. There can be two states S1 and S2 which aren't strictly ordered: + // S1 could have a newer localindex and S2 could have a newer replicatedindex. So + // we need to merge them. But we can't merge them because we wouldn't be able to + // "sign" the resulting header because we don't have access to the HMAC key that + // Vault uses to do so. So instead we compare any of the 0-2 saved states + // we have to the new header, keeping the newest 1-2 of these, and sending + // them to Vault to evaluate. + ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) + ap.l.Unlock() + } + + // Before error checking from the request call, we'd want to initialize a SendResponse to + // potentially return + sendResponse, newErr := NewSendResponse(resp, nil) + if newErr != nil { + return nil, newErr + } + + // Bubble back the api.Response as well for error checking/handling at the handler layer. + return sendResponse, err +} diff --git a/command/agent/cache/api_proxy_test.go b/command/agent/cache/api_proxy_test.go new file mode 100644 index 0000000000000..b90f579c3f000 --- /dev/null +++ b/command/agent/cache/api_proxy_test.go @@ -0,0 +1,95 @@ +package cache + +import ( + "net/http" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func TestAPIProxy(t *testing.T) { + cleanup, client, _, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + proxier, err := NewAPIProxy(&APIProxyConfig{ + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + }) + if err != nil { + t.Fatal(err) + } + + r := client.NewRequest("GET", "/v1/sys/health") + req, err := r.ToHTTP() + if err != nil { + t.Fatal(err) + } + + resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ + Request: req, + }) + if err != nil { + t.Fatal(err) + } + + var result api.HealthResponse + err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) + if err != nil { + t.Fatal(err) + } + + if !result.Initialized || result.Sealed || result.Standby { + t.Fatalf("bad sys/health response: %#v", result) + } +} + +func TestAPIProxy_queryParams(t *testing.T) { + // Set up an agent that points to a standby node for this particular test + // since it needs to proxy a /sys/health?standbyok=true request to a standby + cleanup, client, _, _ := setupClusterAndAgentOnStandby(namespace.RootContext(nil), t, nil) + defer cleanup() + + proxier, err := NewAPIProxy(&APIProxyConfig{ + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + }) + if err != nil { + t.Fatal(err) + } + + r := client.NewRequest("GET", "/v1/sys/health") + req, err := r.ToHTTP() + if err != nil { + t.Fatal(err) + } + + // Add a query parameter for testing + q := req.URL.Query() + q.Add("standbyok", "true") + req.URL.RawQuery = q.Encode() + + resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ + Request: req, + }) + if err != nil { + t.Fatal(err) + } + + var result api.HealthResponse + err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) + if err != nil { + t.Fatal(err) + } + + if !result.Initialized || result.Sealed || !result.Standby { + t.Fatalf("bad sys/health response: %#v", result) + } + + if resp.Response.StatusCode != http.StatusOK { + t.Fatalf("exptected standby to return 200, got: %v", resp.Response.StatusCode) + } +} diff --git a/command/agentproxyshared/cache/cache_test.go b/command/agent/cache/cache_test.go similarity index 84% rename from command/agentproxyshared/cache/cache_test.go rename to command/agent/cache/cache_test.go index 4786950bd5a01..8b2e1caa7155f 100644 --- a/command/agentproxyshared/cache/cache_test.go +++ b/command/agent/cache/cache_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cache import ( @@ -11,6 +8,7 @@ import ( "math/rand" "net" "net/http" + "os" "sync" "testing" "time" @@ -19,8 +17,9 @@ import ( "github.com/hashicorp/go-hclog" kv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" - "github.com/hashicorp/vault/command/agentproxyshared/sink/mock" + "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/command/agent/cache/cachememdb" + "github.com/hashicorp/vault/command/agent/sink/mock" "github.com/hashicorp/vault/helper/namespace" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" @@ -29,6 +28,174 @@ import ( "github.com/hashicorp/vault/vault" ) +const policyAdmin = ` +path "*" { + capabilities = ["sudo", "create", "read", "update", "delete", "list"] +} +` + +// setupClusterAndAgent is a helper func used to set up a test cluster and +// caching agent against the active node. It returns a cleanup func that should +// be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. +func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, false) +} + +// setupClusterAndAgentOnStandby is a helper func used to set up a test cluster +// and caching agent against a standby node. It returns a cleanup func that +// should be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. +func setupClusterAndAgentOnStandby(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, true) +} + +func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig, onStandby bool) (func(), *api.Client, *api.Client, *LeaseCache) { + t.Helper() + + if ctx == nil { + ctx = context.Background() + } + + // Handle sane defaults + if coreConfig == nil { + coreConfig = &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: logging.NewVaultLogger(hclog.Trace), + } + } + + // Always set up the userpass backend since we use that to generate an admin + // token for the client that will make proxied requests to through the agent. + if coreConfig.CredentialBackends == nil || coreConfig.CredentialBackends["userpass"] == nil { + coreConfig.CredentialBackends = map[string]logical.Factory{ + "userpass": userpass.Factory, + } + } + + // Init new test cluster + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + + activeClient := cores[0].Client + standbyClient := cores[1].Client + + // clienToUse is the client for the agent to point to. + clienToUse := activeClient + if onStandby { + clienToUse = standbyClient + } + + // Add an admin policy + if err := activeClient.Sys().PutPolicy("admin", policyAdmin); err != nil { + t.Fatal(err) + } + + // Set up the userpass auth backend and an admin user. Used for getting a token + // for the agent later down in this func. + err := activeClient.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + }) + if err != nil { + t.Fatal(err) + } + + _, err = activeClient.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + "password": "bar", + "policies": []string{"admin"}, + }) + if err != nil { + t.Fatal(err) + } + + // Set up env vars for agent consumption + origEnvVaultAddress := os.Getenv(api.EnvVaultAddress) + os.Setenv(api.EnvVaultAddress, clienToUse.Address()) + + origEnvVaultCACert := os.Getenv(api.EnvVaultCACert) + os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + // Create the API proxier + apiProxy, err := NewAPIProxy(&APIProxyConfig{ + Client: clienToUse, + Logger: cacheLogger.Named("apiproxy"), + }) + if err != nil { + t.Fatal(err) + } + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err := NewLeaseCache(&LeaseCacheConfig{ + Client: clienToUse, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + }) + if err != nil { + t.Fatal(err) + } + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + mux.Handle("/agent/v1/cache-clear", leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", Handler(ctx, cacheLogger, leaseCache, nil, true)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + // testClient is the client that is used to talk to the agent for proxying/caching behavior. + testClient, err := activeClient.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Login via userpass method to derive a managed token. Set that token as the + // testClient's token + resp, err := testClient.Logical().Write("auth/userpass/login/foo", map[string]interface{}{ + "password": "bar", + }) + if err != nil { + t.Fatal(err) + } + testClient.SetToken(resp.Auth.ClientToken) + + cleanup := func() { + // We wait for a tiny bit for things such as agent renewal to exit properly + time.Sleep(50 * time.Millisecond) + + cluster.Cleanup() + os.Setenv(api.EnvVaultAddress, origEnvVaultAddress) + os.Setenv(api.EnvVaultCACert, origEnvVaultCACert) + listener.Close() + } + + return cleanup, clienToUse, testClient, leaseCache +} + func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { t.Helper() for val, valType := range sampleSpace { @@ -81,7 +248,7 @@ func TestCache_AutoAuthTokenStripping(t *testing.T) { mux := http.NewServeMux() mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) + mux.Handle("/", Handler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, @@ -170,7 +337,7 @@ func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { mux := http.NewServeMux() // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) + mux.Handle("/", Handler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, diff --git a/command/agentproxyshared/cache/cacheboltdb/bolt.go b/command/agent/cache/cacheboltdb/bolt.go similarity index 99% rename from command/agentproxyshared/cache/cacheboltdb/bolt.go rename to command/agent/cache/cacheboltdb/bolt.go index 434b4116542a4..72cb7f3b82464 100644 --- a/command/agentproxyshared/cache/cacheboltdb/bolt.go +++ b/command/agent/cache/cacheboltdb/bolt.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cacheboltdb import ( diff --git a/command/agentproxyshared/cache/cacheboltdb/bolt_test.go b/command/agent/cache/cacheboltdb/bolt_test.go similarity index 98% rename from command/agentproxyshared/cache/cacheboltdb/bolt_test.go rename to command/agent/cache/cacheboltdb/bolt_test.go index 95aacd27cef03..d6f5a742ef34a 100644 --- a/command/agentproxyshared/cache/cacheboltdb/bolt_test.go +++ b/command/agent/cache/cacheboltdb/bolt_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cacheboltdb import ( @@ -16,7 +13,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" + "github.com/hashicorp/vault/command/agent/cache/keymanager" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb.go b/command/agent/cache/cachememdb/cache_memdb.go similarity index 98% rename from command/agentproxyshared/cache/cachememdb/cache_memdb.go rename to command/agent/cache/cachememdb/cache_memdb.go index 93aa2bf78faf8..7fdad303bb569 100644 --- a/command/agentproxyshared/cache/cachememdb/cache_memdb.go +++ b/command/agent/cache/cachememdb/cache_memdb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cachememdb import ( diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go b/command/agent/cache/cachememdb/cache_memdb_test.go similarity index 99% rename from command/agentproxyshared/cache/cachememdb/cache_memdb_test.go rename to command/agent/cache/cachememdb/cache_memdb_test.go index 87b8eee798b2f..4162fed0daf7b 100644 --- a/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go +++ b/command/agent/cache/cachememdb/cache_memdb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cachememdb import ( diff --git a/command/agentproxyshared/cache/cachememdb/index.go b/command/agent/cache/cachememdb/index.go similarity index 98% rename from command/agentproxyshared/cache/cachememdb/index.go rename to command/agent/cache/cachememdb/index.go index a7da2edc2514c..546a528cb2e40 100644 --- a/command/agentproxyshared/cache/cachememdb/index.go +++ b/command/agent/cache/cachememdb/index.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cachememdb import ( diff --git a/command/agentproxyshared/cache/cachememdb/index_test.go b/command/agent/cache/cachememdb/index_test.go similarity index 94% rename from command/agentproxyshared/cache/cachememdb/index_test.go rename to command/agent/cache/cachememdb/index_test.go index c59ec5cba3344..577e37d647cdc 100644 --- a/command/agentproxyshared/cache/cachememdb/index_test.go +++ b/command/agent/cache/cachememdb/index_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cachememdb import ( diff --git a/command/agentproxyshared/cache/handler.go b/command/agent/cache/handler.go similarity index 94% rename from command/agentproxyshared/cache/handler.go rename to command/agent/cache/handler.go index bfb4434dc22f1..60f9e046a0fa1 100644 --- a/command/agentproxyshared/cache/handler.go +++ b/command/agent/cache/handler.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cache import ( @@ -18,12 +15,12 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agent/sink" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) -func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool) http.Handler { +func Handler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logger.Info("received request", "method", r.Method, "path", r.URL.Path) @@ -39,7 +36,7 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm } // Parse and reset body. - reqBody, err := io.ReadAll(r.Body) + reqBody, err := ioutil.ReadAll(r.Body) if err != nil { logger.Error("failed to read request body") logical.RespondError(w, http.StatusInternalServerError, errors.New("failed to read request body")) @@ -48,7 +45,7 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm if r.Body != nil { r.Body.Close() } - r.Body = io.NopCloser(bytes.NewReader(reqBody)) + r.Body = ioutil.NopCloser(bytes.NewReader(reqBody)) req := &SendRequest{ Token: token, Request: r, diff --git a/command/agentproxyshared/cache/keymanager/manager.go b/command/agent/cache/keymanager/manager.go similarity index 88% rename from command/agentproxyshared/cache/keymanager/manager.go rename to command/agent/cache/keymanager/manager.go index 0cecc03a11f7d..ff4d0f2c00fa8 100644 --- a/command/agentproxyshared/cache/keymanager/manager.go +++ b/command/agent/cache/keymanager/manager.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keymanager import ( diff --git a/command/agentproxyshared/cache/keymanager/passthrough.go b/command/agent/cache/keymanager/passthrough.go similarity index 96% rename from command/agentproxyshared/cache/keymanager/passthrough.go rename to command/agent/cache/keymanager/passthrough.go index cda6b6e5db349..68a1fc221b623 100644 --- a/command/agentproxyshared/cache/keymanager/passthrough.go +++ b/command/agent/cache/keymanager/passthrough.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keymanager import ( diff --git a/command/agentproxyshared/cache/keymanager/passthrough_test.go b/command/agent/cache/keymanager/passthrough_test.go similarity index 93% rename from command/agentproxyshared/cache/keymanager/passthrough_test.go rename to command/agent/cache/keymanager/passthrough_test.go index 9327ee3f0ec63..084a71a143f26 100644 --- a/command/agentproxyshared/cache/keymanager/passthrough_test.go +++ b/command/agent/cache/keymanager/passthrough_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keymanager import ( diff --git a/command/agentproxyshared/cache/lease_cache.go b/command/agent/cache/lease_cache.go similarity index 97% rename from command/agentproxyshared/cache/lease_cache.go rename to command/agent/cache/lease_cache.go index 3bcb58002a6d6..9bc79d4a2713b 100644 --- a/command/agentproxyshared/cache/lease_cache.go +++ b/command/agent/cache/lease_cache.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cache import ( @@ -23,11 +20,10 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/base62" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agent/cache/cachememdb" "github.com/hashicorp/vault/helper/namespace" nshelper "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/cryptoutil" @@ -174,12 +170,6 @@ func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { c.ps = storageIn } -// PersistentStorage is a getter for the persistent storage field in -// LeaseCache -func (c *LeaseCache) PersistentStorage() *cacheboltdb.BoltStorage { - return c.ps -} - // checkCacheForRequest checks the cache for a particular request based on its // computed ID. It returns a non-nil *SendResponse if an entry is found. func (c *LeaseCache) checkCacheForRequest(id string) (*SendResponse, error) { @@ -484,18 +474,7 @@ func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, return } client.SetToken(req.Token) - - headers := client.Headers() - if headers == nil { - headers = make(http.Header) - } - - // We do not preserve the initial User-Agent here (i.e. use - // AgentProxyStringWithProxiedUserAgent) since these requests are from - // the proxy subsystem, but are made by Agent's lifetime watcher, - // not triggered by a specific request. - headers.Set("User-Agent", useragent.AgentProxyString()) - client.SetHeaders(headers) + client.SetHeaders(req.Request.Header) watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ Secret: secret, @@ -589,11 +568,6 @@ func computeIndexID(req *SendRequest) (string, error) { // HandleCacheClear returns a handlerFunc that can perform cache clearing operations. func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the cache is not enabled, return a 200 - if c == nil { - return - } - // Only handle POST/PUT requests switch r.Method { case http.MethodPost: diff --git a/command/agentproxyshared/cache/lease_cache_test.go b/command/agent/cache/lease_cache_test.go similarity index 97% rename from command/agentproxyshared/cache/lease_cache_test.go rename to command/agent/cache/lease_cache_test.go index 2de4c56b09bb5..1501fcfe56db0 100644 --- a/command/agentproxyshared/cache/lease_cache_test.go +++ b/command/agent/cache/lease_cache_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cache import ( @@ -21,10 +18,9 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" - "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" - "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agent/cache/cachememdb" + "github.com/hashicorp/vault/command/agent/cache/keymanager" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -43,7 +39,7 @@ func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache { lc, err := NewLeaseCache(&LeaseCacheConfig{ Client: client, BaseContext: context.Background(), - Proxier: NewMockProxier(responses), + Proxier: newMockProxier(responses), Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), }) if err != nil { @@ -82,7 +78,7 @@ func testNewLeaseCacheWithPersistence(t *testing.T, responses []*SendResponse, s lc, err := NewLeaseCache(&LeaseCacheConfig{ Client: client, BaseContext: context.Background(), - Proxier: NewMockProxier(responses), + Proxier: newMockProxier(responses), Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), Storage: storage, }) @@ -738,7 +734,7 @@ func compareBeforeAndAfter(t *testing.T, before, after *LeaseCache, beforeLen, a assert.Equal(t, cachedItem.Lease, restoredItem.Lease) assert.Equal(t, cachedItem.LeaseToken, restoredItem.LeaseToken) assert.Equal(t, cachedItem.Namespace, restoredItem.Namespace) - assert.EqualValues(t, cachedItem.RequestHeader, restoredItem.RequestHeader) + assert.Equal(t, cachedItem.RequestHeader, restoredItem.RequestHeader) assert.Equal(t, cachedItem.RequestMethod, restoredItem.RequestMethod) assert.Equal(t, cachedItem.RequestPath, restoredItem.RequestPath) assert.Equal(t, cachedItem.RequestToken, restoredItem.RequestToken) @@ -843,21 +839,16 @@ func TestLeaseCache_PersistAndRestore(t *testing.T) { var deleteIDs []string for i, ct := range cacheTests { // Send once to cache - req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) - req.Header.Set("User-Agent", useragent.AgentProxyString()) - sendReq := &SendRequest{ Token: ct.token, - Request: req, + Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), } if ct.deleteFromPersistentStore { deleteID, err := computeIndexID(sendReq) require.NoError(t, err) deleteIDs = append(deleteIDs, deleteID) // Now reset the body after calculating the index - req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) - req.Header.Set("User-Agent", useragent.AgentProxyString()) - sendReq.Request = req + sendReq.Request = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) } resp, err := lc.Send(context.Background(), sendReq) require.NoError(t, err) @@ -866,11 +857,9 @@ func TestLeaseCache_PersistAndRestore(t *testing.T) { // Send again to test cache. If this isn't cached, the response returned // will be the next in the list and the status code will not match. - req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) - req.Header.Set("User-Agent", useragent.AgentProxyString()) sendCacheReq := &SendRequest{ Token: ct.token, - Request: req, + Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), } respCached, err := lc.Send(context.Background(), sendCacheReq) require.NoError(t, err, "failed to send request %+v", ct) @@ -902,11 +891,9 @@ func TestLeaseCache_PersistAndRestore(t *testing.T) { // And finally send the cache requests once to make sure they're all being // served from the restoredCache unless they were intended to be missing after restore. for i, ct := range cacheTests { - req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) - req.Header.Set("User-Agent", useragent.AgentProxyString()) sendCacheReq := &SendRequest{ Token: ct.token, - Request: req, + Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), } respCached, err := restoredCache.Send(context.Background(), sendCacheReq) require.NoError(t, err, "failed to send request %+v", ct) diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go new file mode 100644 index 0000000000000..c11867ac13a53 --- /dev/null +++ b/command/agent/cache/listener.go @@ -0,0 +1,68 @@ +package cache + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" +) + +func StartListener(lnConfig *configutil.Listener) (net.Listener, *tls.Config, error) { + addr := lnConfig.Address + + var ln net.Listener + var err error + switch lnConfig.Type { + case "tcp": + if addr == "" { + addr = "127.0.0.1:8200" + } + + bindProto := "tcp" + // If they've passed 0.0.0.0, we only want to bind on IPv4 + // rather than golang's dual stack default + if strings.HasPrefix(addr, "0.0.0.0:") { + bindProto = "tcp4" + } + + ln, err = net.Listen(bindProto, addr) + if err != nil { + return nil, nil, err + } + ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} + + case "unix": + var uConfig *listenerutil.UnixSocketsConfig + if lnConfig.SocketMode != "" && + lnConfig.SocketUser != "" && + lnConfig.SocketGroup != "" { + uConfig = &listenerutil.UnixSocketsConfig{ + Mode: lnConfig.SocketMode, + User: lnConfig.SocketUser, + Group: lnConfig.SocketGroup, + } + } + ln, err = listenerutil.UnixSocketListener(addr, uConfig) + if err != nil { + return nil, nil, err + } + + default: + return nil, nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) + } + + props := map[string]string{"addr": ln.Addr().String()} + tlsConf, _, err := listenerutil.TLSConfig(lnConfig, props, nil) + if err != nil { + return nil, nil, err + } + if tlsConf != nil { + ln = tls.NewListener(ln, tlsConf) + } + + return ln, tlsConf, nil +} diff --git a/command/agent/cache/proxy.go b/command/agent/cache/proxy.go new file mode 100644 index 0000000000000..ef7a83711f378 --- /dev/null +++ b/command/agent/cache/proxy.go @@ -0,0 +1,76 @@ +package cache + +import ( + "bytes" + "context" + "io/ioutil" + "net/http" + "time" + + "github.com/hashicorp/vault/api" +) + +// SendRequest is the input for Proxier.Send. +type SendRequest struct { + Token string + Request *http.Request + + // RequestBody is the stored body bytes from Request.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + RequestBody []byte +} + +// SendResponse is the output from Proxier.Send. +type SendResponse struct { + Response *api.Response + + // ResponseBody is the stored body bytes from Response.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + ResponseBody []byte + CacheMeta *CacheMeta +} + +// CacheMeta contains metadata information about the response, +// such as whether it was a cache hit or miss, and the age of the +// cached entry. +type CacheMeta struct { + Hit bool + Age time.Duration +} + +// Proxier is the interface implemented by different components that are +// responsible for performing specific tasks, such as caching and proxying. All +// these tasks combined together would serve the request received by the agent. +type Proxier interface { + Send(ctx context.Context, req *SendRequest) (*SendResponse, error) +} + +// NewSendResponse creates a new SendResponse and takes care of initializing its +// fields properly. +func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { + resp := &SendResponse{ + Response: apiResponse, + CacheMeta: &CacheMeta{}, + } + + // If a response body is separately provided we set that as the SendResponse.ResponseBody, + // otherwise we will do an ioutil.ReadAll to extract the response body from apiResponse. + switch { + case len(responseBody) > 0: + resp.ResponseBody = responseBody + case apiResponse.Body != nil: + respBody, err := ioutil.ReadAll(apiResponse.Body) + if err != nil { + return nil, err + } + // Close the old body + apiResponse.Body.Close() + + // Re-set the response body after reading from the Reader + apiResponse.Body = ioutil.NopCloser(bytes.NewReader(respBody)) + + resp.ResponseBody = respBody + } + + return resp, nil +} diff --git a/command/agent/cache/testing.go b/command/agent/cache/testing.go new file mode 100644 index 0000000000000..9ec637be4e0e5 --- /dev/null +++ b/command/agent/cache/testing.go @@ -0,0 +1,107 @@ +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/api" +) + +// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. +// The mock will return the provided responses every time it reaches its Send method, up to +// the last provided response. This lets tests control what the next/underlying Proxier layer +// might expect to return. +type mockProxier struct { + proxiedResponses []*SendResponse + responseIndex int +} + +func newMockProxier(responses []*SendResponse) *mockProxier { + return &mockProxier{ + proxiedResponses: responses, + } +} + +func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.responseIndex >= len(p.proxiedResponses) { + return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) + } + resp := p.proxiedResponses[p.responseIndex] + + p.responseIndex++ + + return resp, nil +} + +func (p *mockProxier) ResponseIndex() int { + return p.responseIndex +} + +func newTestSendResponse(status int, body string) *SendResponse { + resp := &SendResponse{ + Response: &api.Response{ + Response: &http.Response{ + StatusCode: status, + Header: http.Header{}, + }, + }, + } + resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) + + if body != "" { + resp.Response.Body = ioutil.NopCloser(strings.NewReader(body)) + resp.ResponseBody = []byte(body) + } + + if json.Valid([]byte(body)) { + resp.Response.Header.Set("content-type", "application/json") + } + + return resp +} + +type mockTokenVerifierProxier struct { + currentToken string +} + +func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + p.currentToken = req.Token + resp := newTestSendResponse(http.StatusOK, + `{"data": {"id": "`+p.currentToken+`"}}`) + + return resp, nil +} + +func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { + return p.currentToken +} + +type mockDelayProxier struct { + cacheableResp bool + delay int +} + +func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.delay > 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(time.Duration(p.delay) * time.Millisecond): + } + } + + // If this is a cacheable response, we return a unique response every time + if p.cacheableResp { + rand.Seed(time.Now().Unix()) + s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) + return newTestSendResponse(http.StatusOK, s), nil + } + + return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil +} diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go index a2a359abc9bd9..4ad056a850ccf 100644 --- a/command/agent/cache_end_to_end_test.go +++ b/command/agent/cache_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -17,13 +14,12 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" - cache "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" - "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/command/agent/auth" + agentapprole "github.com/hashicorp/vault/command/agent/auth/approle" + "github.com/hashicorp/vault/command/agent/cache" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agent/sink/inmem" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -167,10 +163,8 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // Create the API proxier apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: client, - Logger: cacheLogger.Named("apiproxy"), - UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, - UserAgentString: useragent.ProxyAPIProxyString(), + Client: client, + Logger: cacheLogger.Named("apiproxy"), }) if err != nil { t.Fatal(err) @@ -321,7 +315,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) // Passing a non-nil inmemsink tells the agent to use the auto-auth token - mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true)) + mux.Handle("/", cache.Handler(ctx, cacheLogger, leaseCache, inmemSink, true)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, diff --git a/command/agent/cert_end_to_end_test.go b/command/agent/cert_end_to_end_test.go index 12ea933ed60e6..bacb188021cda 100644 --- a/command/agent/cert_end_to_end_test.go +++ b/command/agent/cert_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -12,14 +9,16 @@ import ( "testing" "time" + "github.com/hashicorp/vault/builtin/logical/pki" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" vaultcert "github.com/hashicorp/vault/builtin/credential/cert" - "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentcert "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agent/auth" + agentcert "github.com/hashicorp/vault/command/agent/auth/cert" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" "github.com/hashicorp/vault/helper/dhutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" diff --git a/command/agent/cf_end_to_end_test.go b/command/agent/cf_end_to_end_test.go index e143223af1d96..6bc1fa8b6a077 100644 --- a/command/agent/cf_end_to_end_test.go +++ b/command/agent/cf_end_to_end_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -15,10 +12,10 @@ import ( "github.com/hashicorp/vault-plugin-auth-cf/testing/certificates" cfAPI "github.com/hashicorp/vault-plugin-auth-cf/testing/cf" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentcf "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agent/auth" + agentcf "github.com/hashicorp/vault/command/agent/auth/cf" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" diff --git a/command/agent/config/config.go b/command/agent/config/config.go index eea108b9fab3a..b32772564b3ca 100644 --- a/command/agent/config/config.go +++ b/command/agent/config/config.go @@ -1,56 +1,43 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package config import ( "context" "errors" "fmt" - "io" + "io/ioutil" "net" "os" - "path/filepath" "strings" - "syscall" "time" ctconfig "github.com/hashicorp/consul-template/config" - ctsignals "github.com/hashicorp/consul-template/signals" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" - "k8s.io/utils/strings/slices" - - "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/sdk/helper/pointerutil" + "github.com/mitchellh/mapstructure" ) -// Config is the configuration for Vault Agent. +// Config is the configuration for the vault server. type Config struct { *configutil.SharedConfig `hcl:"-"` AutoAuth *AutoAuth `hcl:"auto_auth"` ExitAfterAuth bool `hcl:"exit_after_auth"` Cache *Cache `hcl:"cache"` - APIProxy *APIProxy `hcl:"api_proxy"` Vault *Vault `hcl:"vault"` TemplateConfig *TemplateConfig `hcl:"template_config"` Templates []*ctconfig.TemplateConfig `hcl:"templates"` DisableIdleConns []string `hcl:"disable_idle_connections"` - DisableIdleConnsAPIProxy bool `hcl:"-"` + DisableIdleConnsCaching bool `hcl:"-"` DisableIdleConnsTemplating bool `hcl:"-"` DisableIdleConnsAutoAuth bool `hcl:"-"` DisableKeepAlives []string `hcl:"disable_keep_alives"` - DisableKeepAlivesAPIProxy bool `hcl:"-"` + DisableKeepAlivesCaching bool `hcl:"-"` DisableKeepAlivesTemplating bool `hcl:"-"` DisableKeepAlivesAutoAuth bool `hcl:"-"` - Exec *ExecConfig `hcl:"exec,optional"` - EnvTemplates []*ctconfig.TemplateConfig `hcl:"env_template,optional"` } const ( @@ -102,24 +89,24 @@ type transportDialer interface { DialContext(ctx context.Context, network, address string) (net.Conn, error) } -// APIProxy contains any configuration needed for proxy mode -type APIProxy struct { - UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` - UseAutoAuthToken bool `hcl:"-"` - ForceAutoAuthToken bool `hcl:"-"` - EnforceConsistency string `hcl:"enforce_consistency"` - WhenInconsistent string `hcl:"when_inconsistent"` -} - // Cache contains any configuration needed for Cache mode type Cache struct { - UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` - UseAutoAuthToken bool `hcl:"-"` - ForceAutoAuthToken bool `hcl:"-"` - EnforceConsistency string `hcl:"enforce_consistency"` - WhenInconsistent string `hcl:"when_inconsistent"` - Persist *agentproxyshared.PersistConfig `hcl:"persist"` - InProcDialer transportDialer `hcl:"-"` + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` + Persist *Persist `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` +} + +// Persist contains configuration needed for persistent caching +type Persist struct { + Type string + Path string `hcl:"path"` + KeepAfterImport bool `hcl:"keep_after_import"` + ExitOnErr bool `hcl:"exit_on_err"` + ServiceAccountTokenFile string `hcl:"service_account_token_file"` } // AutoAuth is the configured authentication method and sinks @@ -167,416 +154,26 @@ type TemplateConfig struct { StaticSecretRenderInt time.Duration `hcl:"-"` } -type ExecConfig struct { - Command []string `hcl:"command,attr" mapstructure:"command"` - RestartOnSecretChanges string `hcl:"restart_on_secret_changes,optional" mapstructure:"restart_on_secret_changes"` - RestartStopSignal os.Signal `hcl:"-" mapstructure:"restart_stop_signal"` -} - func NewConfig() *Config { return &Config{ SharedConfig: new(configutil.SharedConfig), } } -// Merge merges two Agent configurations. -func (c *Config) Merge(c2 *Config) *Config { - if c2 == nil { - return c - } - - result := NewConfig() - - result.SharedConfig = c.SharedConfig - if c2.SharedConfig != nil { - result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) - } - - result.AutoAuth = c.AutoAuth - if c2.AutoAuth != nil { - result.AutoAuth = c2.AutoAuth - } - - result.Cache = c.Cache - if c2.Cache != nil { - result.Cache = c2.Cache - } - - result.APIProxy = c.APIProxy - if c2.APIProxy != nil { - result.APIProxy = c2.APIProxy - } - - result.DisableMlock = c.DisableMlock - if c2.DisableMlock { - result.DisableMlock = c2.DisableMlock - } - - // For these, ignore the non-specific one and overwrite them all - result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth - if c2.DisableIdleConnsAutoAuth { - result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth - } - - result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy - if c2.DisableIdleConnsAPIProxy { - result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy - } - - result.DisableIdleConnsTemplating = c.DisableIdleConnsTemplating - if c2.DisableIdleConnsTemplating { - result.DisableIdleConnsTemplating = c2.DisableIdleConnsTemplating - } - - result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth - if c2.DisableKeepAlivesAutoAuth { - result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth - } - - result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy - if c2.DisableKeepAlivesAPIProxy { - result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy - } - - result.DisableKeepAlivesTemplating = c.DisableKeepAlivesTemplating - if c2.DisableKeepAlivesTemplating { - result.DisableKeepAlivesTemplating = c2.DisableKeepAlivesTemplating - } - - result.TemplateConfig = c.TemplateConfig - if c2.TemplateConfig != nil { - result.TemplateConfig = c2.TemplateConfig - } - - for _, l := range c.Templates { - result.Templates = append(result.Templates, l) - } - for _, l := range c2.Templates { - result.Templates = append(result.Templates, l) - } - - result.ExitAfterAuth = c.ExitAfterAuth - if c2.ExitAfterAuth { - result.ExitAfterAuth = c2.ExitAfterAuth - } - - result.Vault = c.Vault - if c2.Vault != nil { - result.Vault = c2.Vault - } - - result.PidFile = c.PidFile - if c2.PidFile != "" { - result.PidFile = c2.PidFile - } - - result.Exec = c.Exec - if c2.Exec != nil { - result.Exec = c2.Exec - } - - for _, envTmpl := range c.EnvTemplates { - result.EnvTemplates = append(result.EnvTemplates, envTmpl) - } - - for _, envTmpl := range c2.EnvTemplates { - result.EnvTemplates = append(result.EnvTemplates, envTmpl) - } - - return result -} - -// IsDefaultListerDefined returns true if a default listener has been defined -// in this config -func (c *Config) IsDefaultListerDefined() bool { - for _, l := range c.Listeners { - if l.Role != "metrics_only" { - return true - } - } - return false -} - -// ValidateConfig validates an Agent configuration after it has been fully merged together, to -// ensure that required combinations of configs are there -func (c *Config) ValidateConfig() error { - if c.APIProxy != nil && c.Cache != nil { - if c.Cache.UseAutoAuthTokenRaw != nil { - if c.APIProxy.UseAutoAuthTokenRaw != nil { - return fmt.Errorf("use_auto_auth_token defined in both api_proxy and cache config. Please remove this configuration from the cache block") - } else { - c.APIProxy.ForceAutoAuthToken = c.Cache.ForceAutoAuthToken - } - } - } - - if c.Cache != nil { - if len(c.Listeners) < 1 && len(c.Templates) < 1 && len(c.EnvTemplates) < 1 { - return fmt.Errorf("enabling the cache requires at least 1 template or 1 listener to be defined") - } - - if c.Cache.UseAutoAuthToken { - if c.AutoAuth == nil { - return fmt.Errorf("cache.use_auto_auth_token is true but auto_auth not configured") - } - if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { - return fmt.Errorf("cache.use_auto_auth_token is true and auto_auth uses wrapping") - } - } - } - - if c.APIProxy != nil { - if len(c.Listeners) < 1 { - return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") - } - - if c.APIProxy.UseAutoAuthToken { - if c.AutoAuth == nil { - return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") - } - if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { - return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") - } - } - } - - if c.AutoAuth != nil { - if len(c.AutoAuth.Sinks) == 0 && - (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) && - len(c.Templates) == 0 && - len(c.EnvTemplates) == 0 { - return fmt.Errorf("auto_auth requires at least one sink or at least one template or api_proxy.use_auto_auth_token=true") - } - } - - if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { - return fmt.Errorf("no auto_auth, cache, or listener block found in config") - } - - return c.validateEnvTemplateConfig() -} - -func (c *Config) validateEnvTemplateConfig() error { - // if we are not in env-template mode, exit early - if c.Exec == nil && len(c.EnvTemplates) == 0 { - return nil - } - - if c.Exec == nil { - return fmt.Errorf("a top-level 'exec' element must be specified with 'env_template' entries") - } - - if len(c.EnvTemplates) == 0 { - return fmt.Errorf("must specify at least one 'env_template' element with a top-level 'exec' element") - } - - if c.APIProxy != nil { - return fmt.Errorf("'api_proxy' cannot be specified with 'env_template' entries") - } - - if len(c.Templates) > 0 { - return fmt.Errorf("'template' cannot be specified with 'env_template' entries") - } - - if len(c.Exec.Command) == 0 { - return fmt.Errorf("'exec' requires a non-empty 'command' field") - } - - if !slices.Contains([]string{"always", "never"}, c.Exec.RestartOnSecretChanges) { - return fmt.Errorf("'exec.restart_on_secret_changes' unexpected value: %q", c.Exec.RestartOnSecretChanges) - } - - uniqueKeys := make(map[string]struct{}) - - for _, template := range c.EnvTemplates { - // Required: - // - the key (environment variable name) - // - either "contents" or "source" - // Optional / permitted: - // - error_on_missing_key - // - error_fatal - // - left_delimiter - // - right_delimiter - // - ExtFuncMap - // - function_denylist / function_blacklist - - if template.MapToEnvironmentVariable == nil { - return fmt.Errorf("env_template: an environment variable name is required") - } - - key := *template.MapToEnvironmentVariable - - if _, exists := uniqueKeys[key]; exists { - return fmt.Errorf("env_template: duplicate environment variable name: %q", key) - } - - uniqueKeys[key] = struct{}{} - - if template.Contents == nil && template.Source == nil { - return fmt.Errorf("env_template[%s]: either 'contents' or 'source' must be specified", key) - } - - if template.Contents != nil && template.Source != nil { - return fmt.Errorf("env_template[%s]: 'contents' and 'source' cannot be specified together", key) - } - - if template.Backup != nil { - return fmt.Errorf("env_template[%s]: 'backup' is not allowed", key) - } - - if template.Command != nil { - return fmt.Errorf("env_template[%s]: 'command' is not allowed", key) - } - - if template.CommandTimeout != nil { - return fmt.Errorf("env_template[%s]: 'command_timeout' is not allowed", key) - } - - if template.CreateDestDirs != nil { - return fmt.Errorf("env_template[%s]: 'create_dest_dirs' is not allowed", key) - } - - if template.Destination != nil { - return fmt.Errorf("env_template[%s]: 'destination' is not allowed", key) - } - - if template.Exec != nil { - return fmt.Errorf("env_template[%s]: 'exec' is not allowed", key) - } - - if template.Perms != nil { - return fmt.Errorf("env_template[%s]: 'perms' is not allowed", key) - } - - if template.User != nil { - return fmt.Errorf("env_template[%s]: 'user' is not allowed", key) - } - - if template.Uid != nil { - return fmt.Errorf("env_template[%s]: 'uid' is not allowed", key) - } - - if template.Group != nil { - return fmt.Errorf("env_template[%s]: 'group' is not allowed", key) - } - - if template.Gid != nil { - return fmt.Errorf("env_template[%s]: 'gid' is not allowed", key) - } - - if template.Wait != nil { - return fmt.Errorf("env_template[%s]: 'wait' is not allowed", key) - } - - if template.SandboxPath != nil { - return fmt.Errorf("env_template[%s]: 'sandbox_path' is not allowed", key) - } - } - - return nil -} - // LoadConfig loads the configuration at the given path, regardless if -// it's a file or directory. +// its a file or directory. func LoadConfig(path string) (*Config, error) { fi, err := os.Stat(path) if err != nil { return nil, err } - if fi.IsDir() { - return LoadConfigDir(path) - } - return LoadConfigFile(path) -} - -// LoadConfigDir loads the configuration at the given path if it's a directory -func LoadConfigDir(dir string) (*Config, error) { - f, err := os.Open(dir) - if err != nil { - return nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, err - } - if !fi.IsDir() { - return nil, fmt.Errorf("configuration path must be a directory: %q", dir) - } - - var files []string - err = nil - for err != io.EOF { - var fis []os.FileInfo - fis, err = f.Readdir(128) - if err != nil && err != io.EOF { - return nil, err - } - - for _, fi := range fis { - // Ignore directories - if fi.IsDir() { - continue - } - - // Only care about files that are valid to load. - name := fi.Name() - skip := true - if strings.HasSuffix(name, ".hcl") { - skip = false - } else if strings.HasSuffix(name, ".json") { - skip = false - } - if skip || isTemporaryFile(name) { - continue - } - - path := filepath.Join(dir, name) - files = append(files, path) - } - } - - result := NewConfig() - for _, f := range files { - config, err := LoadConfigFile(f) - if err != nil { - return nil, fmt.Errorf("error loading %q: %w", f, err) - } - - if result == nil { - result = config - } else { - result = result.Merge(config) - } - } - - return result, nil -} - -// isTemporaryFile returns true or false depending on whether the -// provided file name is a temporary file for the following editors: -// emacs or vim. -func isTemporaryFile(name string) bool { - return strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, ".#") || // emacs - (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs -} - -// LoadConfigFile loads the configuration at the given path if it's a file -func LoadConfigFile(path string) (*Config, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - if fi.IsDir() { return nil, fmt.Errorf("location is a directory, not a file") } // Read the file - d, err := os.ReadFile(path) + d, err := ioutil.ReadFile(path) if err != nil { return nil, err } @@ -626,10 +223,6 @@ func LoadConfigFile(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'cache':%w", err) } - if err := parseAPIProxy(result, list); err != nil { - return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) - } - if err := parseTemplateConfig(result, list); err != nil { return nil, fmt.Errorf("error parsing 'template_config': %w", err) } @@ -638,18 +231,26 @@ func LoadConfigFile(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'template': %w", err) } - if err := parseExec(result, list); err != nil { - return nil, fmt.Errorf("error parsing 'exec': %w", err) - } + if result.Cache != nil { + if len(result.Listeners) < 1 && len(result.Templates) < 1 { + return nil, fmt.Errorf("enabling the cache requires at least 1 template or 1 listener to be defined") + } - if err := parseEnvTemplates(result, list); err != nil { - return nil, fmt.Errorf("error parsing 'env_template': %w", err) + if result.Cache.UseAutoAuthToken { + if result.AutoAuth == nil { + return nil, fmt.Errorf("cache.use_auto_auth_token is true but auto_auth not configured") + } + if result.AutoAuth.Method.WrapTTL > 0 { + return nil, fmt.Errorf("cache.use_auto_auth_token is true and auto_auth uses wrapping") + } + } } - if result.Cache != nil && result.APIProxy == nil && (result.Cache.UseAutoAuthToken || result.Cache.ForceAutoAuthToken) { - result.APIProxy = &APIProxy{ - UseAutoAuthToken: result.Cache.UseAutoAuthToken, - ForceAutoAuthToken: result.Cache.ForceAutoAuthToken, + if result.AutoAuth != nil { + if len(result.AutoAuth.Sinks) == 0 && + (result.Cache == nil || !result.Cache.UseAutoAuthToken) && + len(result.Templates) == 0 { + return nil, fmt.Errorf("auto_auth requires at least one sink or at least one template or cache.use_auto_auth_token=true") } } @@ -658,17 +259,19 @@ func LoadConfigFile(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'vault':%w", err) } - if result.Vault != nil { - // Set defaults - if result.Vault.Retry == nil { - result.Vault.Retry = &Retry{} - } - switch result.Vault.Retry.NumRetries { - case 0: - result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts - case -1: - result.Vault.Retry.NumRetries = 0 - } + if result.Vault == nil { + result.Vault = &Vault{} + } + + // Set defaults + if result.Vault.Retry == nil { + result.Vault.Retry = &Retry{} + } + switch result.Vault.Retry.NumRetries { + case 0: + result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts + case -1: + result.Vault.Retry.NumRetries = 0 } if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { @@ -682,8 +285,8 @@ func LoadConfigFile(path string) (*Config, error) { switch subsystem { case "auto-auth": result.DisableIdleConnsAutoAuth = true - case "caching", "proxying": - result.DisableIdleConnsAPIProxy = true + case "caching": + result.DisableIdleConnsCaching = true case "templating": result.DisableIdleConnsTemplating = true case "": @@ -704,8 +307,8 @@ func LoadConfigFile(path string) (*Config, error) { switch subsystem { case "auto-auth": result.DisableKeepAlivesAutoAuth = true - case "caching", "proxying": - result.DisableKeepAlivesAPIProxy = true + case "caching": + result.DisableKeepAlivesCaching = true case "templating": result.DisableKeepAlivesTemplating = true case "": @@ -784,50 +387,6 @@ func parseRetry(result *Config, list *ast.ObjectList) error { return nil } -func parseAPIProxy(result *Config, list *ast.ObjectList) error { - name := "api_proxy" - - apiProxyList := list.Filter(name) - if len(apiProxyList.Items) == 0 { - return nil - } - - if len(apiProxyList.Items) > 1 { - return fmt.Errorf("one and only one %q block is required", name) - } - - item := apiProxyList.Items[0] - - var apiProxy APIProxy - err := hcl.DecodeObject(&apiProxy, item.Val) - if err != nil { - return err - } - - if apiProxy.UseAutoAuthTokenRaw != nil { - apiProxy.UseAutoAuthToken, err = parseutil.ParseBool(apiProxy.UseAutoAuthTokenRaw) - if err != nil { - // Could be a value of "force" instead of "true"/"false" - switch apiProxy.UseAutoAuthTokenRaw.(type) { - case string: - v := apiProxy.UseAutoAuthTokenRaw.(string) - - if !strings.EqualFold(v, "force") { - return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", apiProxy.UseAutoAuthTokenRaw) - } - apiProxy.UseAutoAuthToken = true - apiProxy.ForceAutoAuthToken = true - - default: - return err - } - } - } - result.APIProxy = &apiProxy - - return nil -} - func parseCache(result *Config, list *ast.ObjectList) error { name := "cache" @@ -895,7 +454,7 @@ func parsePersist(result *Config, list *ast.ObjectList) error { item := persistList.Items[0] - var p agentproxyshared.PersistConfig + var p Persist err := hcl.DecodeObject(&p, item.Val) if err != nil { return err @@ -1193,121 +752,3 @@ func parseTemplates(result *Config, list *ast.ObjectList) error { result.Templates = tcs return nil } - -func parseExec(result *Config, list *ast.ObjectList) error { - name := "exec" - - execList := list.Filter(name) - if len(execList.Items) == 0 { - return nil - } - - if len(execList.Items) > 1 { - return fmt.Errorf("at most one %q block is allowed", name) - } - - item := execList.Items[0] - var shadow interface{} - if err := hcl.DecodeObject(&shadow, item.Val); err != nil { - return fmt.Errorf("error decoding config: %s", err) - } - - parsed, ok := shadow.(map[string]interface{}) - if !ok { - return errors.New("error converting config") - } - - var execConfig ExecConfig - var md mapstructure.Metadata - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - ctconfig.StringToFileModeFunc(), - ctconfig.StringToWaitDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - mapstructure.StringToTimeDurationHookFunc(), - ctsignals.StringToSignalFunc(), - ), - ErrorUnused: true, - Metadata: &md, - Result: &execConfig, - }) - if err != nil { - return errors.New("mapstructure decoder creation failed") - } - if err := decoder.Decode(parsed); err != nil { - return err - } - - // if the user does not specify a restart signal, default to SIGTERM - if execConfig.RestartStopSignal == nil { - execConfig.RestartStopSignal = syscall.SIGTERM - } - - if execConfig.RestartOnSecretChanges == "" { - execConfig.RestartOnSecretChanges = "always" - } - - result.Exec = &execConfig - return nil -} - -func parseEnvTemplates(result *Config, list *ast.ObjectList) error { - name := "env_template" - - envTemplateList := list.Filter(name) - - if len(envTemplateList.Items) < 1 { - return nil - } - - envTemplates := make([]*ctconfig.TemplateConfig, 0, len(envTemplateList.Items)) - - for _, item := range envTemplateList.Items { - var shadow interface{} - if err := hcl.DecodeObject(&shadow, item.Val); err != nil { - return fmt.Errorf("error decoding config: %s", err) - } - - // Convert to a map and flatten the keys we want to flatten - parsed, ok := shadow.(map[string]any) - if !ok { - return errors.New("error converting config") - } - - var templateConfig ctconfig.TemplateConfig - var md mapstructure.Metadata - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - ctconfig.StringToFileModeFunc(), - ctconfig.StringToWaitDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - mapstructure.StringToTimeDurationHookFunc(), - ctsignals.StringToSignalFunc(), - ), - ErrorUnused: true, - Metadata: &md, - Result: &templateConfig, - }) - if err != nil { - return errors.New("mapstructure decoder creation failed") - } - if err := decoder.Decode(parsed); err != nil { - return err - } - - // parse the keys in the item for the environment variable name - if numberOfKeys := len(item.Keys); numberOfKeys != 1 { - return fmt.Errorf("expected one and only one environment variable name, got %d", numberOfKeys) - } - - // hcl parses this with extra quotes if quoted in config file - environmentVariableName := strings.Trim(item.Keys[0].Token.Text, `"`) - - templateConfig.MapToEnvironmentVariable = pointerutil.StringPtr(environmentVariableName) - - envTemplates = append(envTemplates, &templateConfig) - } - - result.EnvTemplates = envTemplates - return nil -} diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index 3be1ab33ab3e5..8a5491891ff68 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -1,25 +1,18 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package config import ( "os" - "syscall" "testing" "time" "github.com/go-test/deep" ctconfig "github.com/hashicorp/consul-template/config" - "golang.org/x/exp/slices" - - "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/pointerutil" ) func TestLoadConfigFile_AgentCache(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") + config, err := LoadConfig("./test-fixtures/config-cache.hcl") if err != nil { t.Fatal(err) } @@ -41,15 +34,8 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { Address: "127.0.0.1:8300", TLSDisable: true, }, - { - Type: "tcp", - Address: "127.0.0.1:3000", - Role: "metrics_only", - TLSDisable: true, - }, { Type: "tcp", - Role: "default", Address: "127.0.0.1:8400", TLSKeyFile: "/path/to/cakey.pem", TLSCertFile: "/path/to/cacert.pem", @@ -76,15 +62,11 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - ForceAutoAuthToken: false, - }, Cache: &Cache{ UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &agentproxyshared.PersistConfig{ + Persist: &Persist{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -111,7 +93,7 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { t.Fatal(diff) } - config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") + config, err = LoadConfig("./test-fixtures/config-cache-embedded-type.hcl") if err != nil { t.Fatal(err) } @@ -123,236 +105,8 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { } } -func TestLoadConfigDir_AgentCache(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-dir-cache/") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - Listeners: []*configutil.Listener{ - { - Type: "unix", - Address: "/path/to/socket", - TLSDisable: true, - SocketMode: "configmode", - SocketUser: "configuser", - SocketGroup: "configgroup", - }, - { - Type: "tcp", - Address: "127.0.0.1:8300", - TLSDisable: true, - }, - { - Type: "tcp", - Address: "127.0.0.1:3000", - Role: "metrics_only", - TLSDisable: true, - }, - { - Type: "tcp", - Role: "default", - Address: "127.0.0.1:8400", - TLSKeyFile: "/path/to/cakey.pem", - TLSCertFile: "/path/to/cacert.pem", - }, - }, - }, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - Sinks: []*Sink{ - { - Type: "file", - DHType: "curve25519", - DHPath: "/tmp/file-foo-dhpath", - AAD: "foobar", - Config: map[string]interface{}{ - "path": "/tmp/file-foo", - }, - }, - }, - }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - ForceAutoAuthToken: false, - }, - Cache: &Cache{ - UseAutoAuthToken: true, - UseAutoAuthTokenRaw: true, - ForceAutoAuthToken: false, - Persist: &agentproxyshared.PersistConfig{ - Type: "kubernetes", - Path: "/vault/agent-cache/", - KeepAfterImport: true, - ExitOnErr: true, - ServiceAccountTokenFile: "/tmp/serviceaccount/token", - }, - }, - Vault: &Vault{ - Address: "http://127.0.0.1:1111", - CACert: "config_ca_cert", - CAPath: "config_ca_path", - TLSSkipVerifyRaw: interface{}("true"), - TLSSkipVerify: true, - ClientCert: "config_client_cert", - ClientKey: "config_client_key", - Retry: &Retry{ - NumRetries: 12, - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } - - config, err = LoadConfigFile("./test-fixtures/config-dir-cache/config-cache1.hcl") - if err != nil { - t.Fatal(err) - } - config2, err := LoadConfigFile("./test-fixtures/config-dir-cache/config-cache2.hcl") - - mergedConfig := config.Merge(config2) - - mergedConfig.Prune() - if diff := deep.Equal(mergedConfig, expected); diff != nil { - t.Fatal(diff) - } -} - -func TestLoadConfigDir_AutoAuthAndListener(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-dir-auto-auth-and-listener/") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - Listeners: []*configutil.Listener{ - { - Type: "tcp", - Address: "127.0.0.1:8300", - TLSDisable: true, - }, - }, - }, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - Sinks: []*Sink{ - { - Type: "file", - DHType: "curve25519", - DHPath: "/tmp/file-foo-dhpath", - AAD: "foobar", - Config: map[string]interface{}{ - "path": "/tmp/file-foo", - }, - }, - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } - - config, err = LoadConfigFile("./test-fixtures/config-dir-auto-auth-and-listener/config1.hcl") - if err != nil { - t.Fatal(err) - } - config2, err := LoadConfigFile("./test-fixtures/config-dir-auto-auth-and-listener/config2.hcl") - - mergedConfig := config.Merge(config2) - - mergedConfig.Prune() - if diff := deep.Equal(mergedConfig, expected); diff != nil { - t.Fatal(diff) - } -} - -func TestLoadConfigDir_VaultBlock(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-dir-vault-block/") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - }, - Vault: &Vault{ - Address: "http://127.0.0.1:1111", - CACert: "config_ca_cert", - CAPath: "config_ca_path", - TLSSkipVerifyRaw: interface{}("true"), - TLSSkipVerify: true, - ClientCert: "config_client_cert", - ClientKey: "config_client_key", - Retry: &Retry{ - NumRetries: 12, - }, - }, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - Sinks: []*Sink{ - { - Type: "file", - DHType: "curve25519", - DHPath: "/tmp/file-foo-dhpath", - AAD: "foobar", - Config: map[string]interface{}{ - "path": "/tmp/file-foo", - }, - }, - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } - - config, err = LoadConfigFile("./test-fixtures/config-dir-vault-block/config1.hcl") - if err != nil { - t.Fatal(err) - } - config2, err := LoadConfigFile("./test-fixtures/config-dir-vault-block/config2.hcl") - - mergedConfig := config.Merge(config2) - - mergedConfig.Prune() - if diff := deep.Equal(mergedConfig, expected); diff != nil { - t.Fatal(diff) - } -} - func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-no-listeners.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-no-listeners.hcl") if err != nil { t.Fatal(err) } @@ -381,15 +135,11 @@ func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - ForceAutoAuthToken: false, - }, Cache: &Cache{ UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &agentproxyshared.PersistConfig{ + Persist: &Persist{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -433,7 +183,7 @@ func TestLoadConfigFile(t *testing.T) { } }() - config, err := LoadConfigFile("./test-fixtures/config.hcl") + config, err := LoadConfig("./test-fixtures/config.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -441,7 +191,6 @@ func TestLoadConfigFile(t *testing.T) { expected := &Config{ SharedConfig: &configutil.SharedConfig{ PidFile: "./pidfile", - LogFile: "/var/log/vault/vault-agent.log", }, AutoAuth: &AutoAuth{ Method: &Method{ @@ -476,6 +225,11 @@ func TestLoadConfigFile(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -483,7 +237,7 @@ func TestLoadConfigFile(t *testing.T) { t.Fatal(diff) } - config, err = LoadConfigFile("./test-fixtures/config-embedded-type.hcl") + config, err = LoadConfig("./test-fixtures/config-embedded-type.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -495,7 +249,7 @@ func TestLoadConfigFile(t *testing.T) { } func TestLoadConfigFile_Method_Wrapping(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-method-wrapping.hcl") + config, err := LoadConfig("./test-fixtures/config-method-wrapping.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -524,6 +278,11 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -533,7 +292,7 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) { } func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-method-initial-backoff.hcl") + config, err := LoadConfig("./test-fixtures/config-method-initial-backoff.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -563,6 +322,11 @@ func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -572,7 +336,7 @@ func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { } func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-method-exit-on-err.hcl") + config, err := LoadConfig("./test-fixtures/config-method-exit-on-err.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -602,6 +366,11 @@ func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -611,7 +380,7 @@ func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { } func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-no-auto_auth.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-no-auto_auth.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -628,6 +397,11 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -637,98 +411,56 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { } func TestLoadConfigFile_Bad_AgentCache_InconsisentAutoAuth(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl") - if err != nil { - t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) - } - if config == nil { - t.Fatal("config was nil") - } - err = config.ValidateConfig() + _, err := LoadConfig("./test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl") if err == nil { - t.Fatal("ValidateConfig should return an error when use_auto_auth_token=true and no auto_auth section present") + t.Fatal("LoadConfig should return an error when use_auto_auth_token=true and no auto_auth section present") } } func TestLoadConfigFile_Bad_AgentCache_ForceAutoAuthNoMethod(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-cache-force-token-no-auth-method.hcl") - if err != nil { - t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) - } - if config == nil { - t.Fatal("config was nil") - } - err = config.ValidateConfig() + _, err := LoadConfig("./test-fixtures/bad-config-cache-force-auto_auth.hcl") if err == nil { - t.Fatal("ValidateConfig should return an error when use_auto_auth_token=force and no auto_auth section present") + t.Fatal("LoadConfig should return an error when use_auto_auth_token=force and no auto_auth section present") } } func TestLoadConfigFile_Bad_AgentCache_NoListeners(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-cache-no-listeners.hcl") - if err != nil { - t.Fatalf("LoadConfigFile should return an error for this config") + _, err := LoadConfig("./test-fixtures/bad-config-cache-no-listeners.hcl") + if err == nil { + t.Fatal("LoadConfig should return an error when cache section present and no listeners present and no templates defined") } } func TestLoadConfigFile_Bad_AutoAuth_Wrapped_Multiple_Sinks(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl") + _, err := LoadConfig("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks") if err == nil { - t.Fatalf("LoadConfigFile should return an error for this config, err: %v", err) + t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and multiple sinks defined") } } func TestLoadConfigFile_Bad_AutoAuth_Nosinks_Nocache_Notemplates(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl") - if err != nil { - t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) - } - if config == nil { - t.Fatal("config was nil") - } - err = config.ValidateConfig() + _, err := LoadConfig("./test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl") if err == nil { - t.Fatal("ValidateConfig should return an error when auto_auth configured and there are no sinks, caches or templates") + t.Fatal("LoadConfig should return an error when auto_auth configured and there are no sinks, caches or templates") } } func TestLoadConfigFile_Bad_AutoAuth_Both_Wrapping_Types(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl") + _, err := LoadConfig("./test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl") if err == nil { - t.Fatalf("LoadConfigFile should return an error for this config") + t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and sinks.wrap_ttl nonzero") } } func TestLoadConfigFile_Bad_AgentCache_AutoAuth_Method_wrapping(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl") - if err != nil { - t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) - } - if config == nil { - t.Fatal("config was nil") - } - err = config.ValidateConfig() - if err == nil { - t.Fatal("ValidateConfig should return an error when auth_auth.method.wrap_ttl nonzero and cache.use_auto_auth_token=true") - } -} - -func TestLoadConfigFile_Bad_APIProxy_And_Cache_Same_Config(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-api_proxy-cache.hcl") - if err != nil { - t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) - } - if config == nil { - t.Fatal("config was nil") - } - err = config.ValidateConfig() + _, err := LoadConfig("./test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl") if err == nil { - t.Fatal("ValidateConfig should return an error when cache and api_proxy try and configure the same value") + t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and cache.use_auto_auth_token=true") } } func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-no-sink.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-no-sink.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -753,15 +485,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - ForceAutoAuthToken: false, - }, Cache: &Cache{ UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -771,7 +504,7 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { } func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-force.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-force.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -796,15 +529,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - ForceAutoAuthToken: true, - }, Cache: &Cache{ UseAutoAuthToken: true, UseAutoAuthTokenRaw: "force", ForceAutoAuthToken: true, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -814,7 +548,7 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { } func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-true.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-true.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -839,56 +573,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - ForceAutoAuthToken: false, - }, Cache: &Cache{ UseAutoAuthToken: true, UseAutoAuthTokenRaw: "true", ForceAutoAuthToken: false, }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} - -func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl") - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - Listeners: []*configutil.Listener{ - { - Type: "tcp", - Address: "127.0.0.1:8300", - TLSDisable: true, - }, - }, - PidFile: "./pidfile", - }, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Config: map[string]interface{}{ - "role": "foobar", - }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: true, - UseAutoAuthTokenRaw: "force", - ForceAutoAuthToken: true, - EnforceConsistency: "always", - WhenInconsistent: "forward", - }, } config.Prune() @@ -898,7 +592,7 @@ func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { } func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-false.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-false.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -939,6 +633,11 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { UseAutoAuthTokenRaw: "false", ForceAutoAuthToken: false, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -948,14 +647,14 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { } func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache-persist-false.hcl") + config, err := LoadConfig("./test-fixtures/config-cache-persist-false.hcl") if err != nil { t.Fatalf("err: %s", err) } expected := &Config{ Cache: &Cache{ - Persist: &agentproxyshared.PersistConfig{ + Persist: &Persist{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: false, @@ -973,6 +672,11 @@ func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -982,7 +686,7 @@ func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { } func TestLoadConfigFile_AgentCache_PersistMissingType(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/config-cache-persist-empty-type.hcl") + _, err := LoadConfig("./test-fixtures/config-cache-persist-empty-type.hcl") if err == nil || os.IsNotExist(err) { t.Fatal("expected error or file is missing") } @@ -1010,7 +714,7 @@ func TestLoadConfigFile_TemplateConfig(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, err := LoadConfigFile(tc.fixturePath) + config, err := LoadConfig(tc.fixturePath) if err != nil { t.Fatal(err) } @@ -1109,7 +813,7 @@ func TestLoadConfigFile_Template(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, err := LoadConfigFile(tc.fixturePath) + config, err := LoadConfig(tc.fixturePath) if err != nil { t.Fatalf("err: %s", err) } @@ -1139,6 +843,11 @@ func TestLoadConfigFile_Template(t *testing.T) { }, }, }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, Templates: tc.expectedTemplates, } @@ -1215,7 +924,7 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, err := LoadConfigFile(tc.fixturePath) + config, err := LoadConfig(tc.fixturePath) if err != nil { t.Fatalf("err: %s", err) } @@ -1236,6 +945,11 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { Sinks: nil, }, Templates: tc.expectedTemplates, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, + }, + }, } config.Prune() @@ -1246,45 +960,8 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { } } -// TestLoadConfigFile_Template_WithCache tests ensures that cache {} stanza is -// permitted in vault agent configuration with template(s) -func TestLoadConfigFile_Template_WithCache(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-template-with-cache.hcl") - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - }, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Namespace: "my-namespace/", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - }, - Cache: &Cache{}, - Templates: []*ctconfig.TemplateConfig{ - { - Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), - Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} - func TestLoadConfigFile_Vault_Retry(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-vault-retry.hcl") + config, err := LoadConfig("./test-fixtures/config-vault-retry.hcl") if err != nil { t.Fatal(err) } @@ -1329,7 +1006,7 @@ func TestLoadConfigFile_Vault_Retry(t *testing.T) { } func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-vault-retry-empty.hcl") + config, err := LoadConfig("./test-fixtures/config-vault-retry-empty.hcl") if err != nil { t.Fatal(err) } @@ -1374,7 +1051,7 @@ func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { } func TestLoadConfigFile_EnforceConsistency(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-consistency.hcl") + config, err := LoadConfig("./test-fixtures/config-consistency.hcl") if err != nil { t.Fatal(err) } @@ -1394,34 +1071,10 @@ func TestLoadConfigFile_EnforceConsistency(t *testing.T) { EnforceConsistency: "always", WhenInconsistent: "retry", }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} - -func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-consistency-apiproxy.hcl") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - Listeners: []*configutil.Listener{ - { - Type: "tcp", - Address: "127.0.0.1:8300", - TLSDisable: true, - }, + Vault: &Vault{ + Retry: &Retry{ + NumRetries: 12, }, - PidFile: "", - }, - APIProxy: &APIProxy{ - EnforceConsistency: "always", - WhenInconsistent: "retry", }, } @@ -1432,7 +1085,7 @@ func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-all.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-all.hcl") if err != nil { t.Fatal(err) } @@ -1441,8 +1094,8 @@ func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { SharedConfig: &configutil.SharedConfig{ PidFile: "./pidfile", }, - DisableIdleConns: []string{"auto-auth", "caching", "templating", "proxying"}, - DisableIdleConnsAPIProxy: true, + DisableIdleConns: []string{"auto-auth", "caching", "templating"}, + DisableIdleConnsCaching: true, DisableIdleConnsAutoAuth: true, DisableIdleConnsTemplating: true, AutoAuth: &AutoAuth{ @@ -1481,7 +1134,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-auto-auth.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-auto-auth.hcl") if err != nil { t.Fatal(err) } @@ -1491,7 +1144,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { PidFile: "./pidfile", }, DisableIdleConns: []string{"auto-auth"}, - DisableIdleConnsAPIProxy: false, + DisableIdleConnsCaching: false, DisableIdleConnsAutoAuth: true, DisableIdleConnsTemplating: false, AutoAuth: &AutoAuth{ @@ -1530,7 +1183,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-templating.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-templating.hcl") if err != nil { t.Fatal(err) } @@ -1540,7 +1193,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { PidFile: "./pidfile", }, DisableIdleConns: []string{"templating"}, - DisableIdleConnsAPIProxy: false, + DisableIdleConnsCaching: false, DisableIdleConnsAutoAuth: false, DisableIdleConnsTemplating: true, AutoAuth: &AutoAuth{ @@ -1579,7 +1232,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-caching.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-caching.hcl") if err != nil { t.Fatal(err) } @@ -1589,56 +1242,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { PidFile: "./pidfile", }, DisableIdleConns: []string{"caching"}, - DisableIdleConnsAPIProxy: true, - DisableIdleConnsAutoAuth: false, - DisableIdleConnsTemplating: false, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Namespace: "my-namespace/", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - Sinks: []*Sink{ - { - Type: "file", - DHType: "curve25519", - DHPath: "/tmp/file-foo-dhpath", - AAD: "foobar", - Config: map[string]interface{}{ - "path": "/tmp/file-foo", - }, - }, - }, - }, - Vault: &Vault{ - Address: "http://127.0.0.1:1111", - Retry: &Retry{ - ctconfig.DefaultRetryAttempts, - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} - -func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-proxying.hcl") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - }, - DisableIdleConns: []string{"proxying"}, - DisableIdleConnsAPIProxy: true, + DisableIdleConnsCaching: true, DisableIdleConnsAutoAuth: false, DisableIdleConnsTemplating: false, AutoAuth: &AutoAuth{ @@ -1677,7 +1281,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Empty(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-empty.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-empty.hcl") if err != nil { t.Fatal(err) } @@ -1687,7 +1291,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Empty(t *testing.T) { PidFile: "./pidfile", }, DisableIdleConns: []string{}, - DisableIdleConnsAPIProxy: false, + DisableIdleConnsCaching: false, DisableIdleConnsAutoAuth: false, DisableIdleConnsTemplating: false, AutoAuth: &AutoAuth{ @@ -1732,7 +1336,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { if err != nil { t.Fatal(err) } - config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-empty.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-empty.hcl") if err != nil { t.Fatal(err) } @@ -1742,7 +1346,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { PidFile: "./pidfile", }, DisableIdleConns: []string{"auto-auth", "caching", "templating"}, - DisableIdleConnsAPIProxy: true, + DisableIdleConnsCaching: true, DisableIdleConnsAutoAuth: true, DisableIdleConnsTemplating: true, AutoAuth: &AutoAuth{ @@ -1781,14 +1385,14 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { } func TestLoadConfigFile_Bad_Value_Disable_Idle_Conns(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-disable-idle-connections.hcl") + _, err := LoadConfig("./test-fixtures/bad-config-disable-idle-connections.hcl") if err == nil { t.Fatal("should have error, it didn't") } } func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-all.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-all.hcl") if err != nil { t.Fatal(err) } @@ -1797,8 +1401,8 @@ func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { SharedConfig: &configutil.SharedConfig{ PidFile: "./pidfile", }, - DisableKeepAlives: []string{"auto-auth", "caching", "templating", "proxying"}, - DisableKeepAlivesAPIProxy: true, + DisableKeepAlives: []string{"auto-auth", "caching", "templating"}, + DisableKeepAlivesCaching: true, DisableKeepAlivesAutoAuth: true, DisableKeepAlivesTemplating: true, AutoAuth: &AutoAuth{ @@ -1837,7 +1441,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-auto-auth.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-auto-auth.hcl") if err != nil { t.Fatal(err) } @@ -1847,7 +1451,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { PidFile: "./pidfile", }, DisableKeepAlives: []string{"auto-auth"}, - DisableKeepAlivesAPIProxy: false, + DisableKeepAlivesCaching: false, DisableKeepAlivesAutoAuth: true, DisableKeepAlivesTemplating: false, AutoAuth: &AutoAuth{ @@ -1886,7 +1490,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-templating.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-templating.hcl") if err != nil { t.Fatal(err) } @@ -1896,7 +1500,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { PidFile: "./pidfile", }, DisableKeepAlives: []string{"templating"}, - DisableKeepAlivesAPIProxy: false, + DisableKeepAlivesCaching: false, DisableKeepAlivesAutoAuth: false, DisableKeepAlivesTemplating: true, AutoAuth: &AutoAuth{ @@ -1935,7 +1539,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-caching.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-caching.hcl") if err != nil { t.Fatal(err) } @@ -1945,56 +1549,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { PidFile: "./pidfile", }, DisableKeepAlives: []string{"caching"}, - DisableKeepAlivesAPIProxy: true, - DisableKeepAlivesAutoAuth: false, - DisableKeepAlivesTemplating: false, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Namespace: "my-namespace/", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - Sinks: []*Sink{ - { - Type: "file", - DHType: "curve25519", - DHPath: "/tmp/file-foo-dhpath", - AAD: "foobar", - Config: map[string]interface{}{ - "path": "/tmp/file-foo", - }, - }, - }, - }, - Vault: &Vault{ - Address: "http://127.0.0.1:1111", - Retry: &Retry{ - ctconfig.DefaultRetryAttempts, - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} - -func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-proxying.hcl") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - }, - DisableKeepAlives: []string{"proxying"}, - DisableKeepAlivesAPIProxy: true, + DisableKeepAlivesCaching: true, DisableKeepAlivesAutoAuth: false, DisableKeepAlivesTemplating: false, AutoAuth: &AutoAuth{ @@ -2033,7 +1588,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Empty(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-empty.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-empty.hcl") if err != nil { t.Fatal(err) } @@ -2043,7 +1598,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Empty(t *testing.T) { PidFile: "./pidfile", }, DisableKeepAlives: []string{}, - DisableKeepAlivesAPIProxy: false, + DisableKeepAlivesCaching: false, DisableKeepAlivesAutoAuth: false, DisableKeepAlivesTemplating: false, AutoAuth: &AutoAuth{ @@ -2088,7 +1643,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { if err != nil { t.Fatal(err) } - config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-empty.hcl") + config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-empty.hcl") if err != nil { t.Fatal(err) } @@ -2098,7 +1653,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { PidFile: "./pidfile", }, DisableKeepAlives: []string{"auto-auth", "caching", "templating"}, - DisableKeepAlivesAPIProxy: true, + DisableKeepAlivesCaching: true, DisableKeepAlivesAutoAuth: true, DisableKeepAlivesTemplating: true, AutoAuth: &AutoAuth{ @@ -2137,194 +1692,8 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { } func TestLoadConfigFile_Bad_Value_Disable_Keep_Alives(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-disable-keep-alives.hcl") + _, err := LoadConfig("./test-fixtures/bad-config-disable-keep-alives.hcl") if err == nil { t.Fatal("should have error, it didn't") } } - -// TestLoadConfigFile_EnvTemplates_Simple loads and validates an env_template config -func TestLoadConfigFile_EnvTemplates_Simple(t *testing.T) { - cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := cfg.ValidateConfig(); err != nil { - t.Fatalf("validation error: %s", err) - } - - expectedKey := "MY_DATABASE_USER" - found := false - for _, envTemplate := range cfg.EnvTemplates { - if *envTemplate.MapToEnvironmentVariable == expectedKey { - found = true - } - } - if !found { - t.Fatalf("expected environment variable name to be populated") - } -} - -// TestLoadConfigFile_EnvTemplates_Complex loads and validates an env_template config -func TestLoadConfigFile_EnvTemplates_Complex(t *testing.T) { - cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := cfg.ValidateConfig(); err != nil { - t.Fatalf("validation error: %s", err) - } - - expectedKeys := []string{ - "FOO_PASSWORD", - "FOO_USER", - } - - envExists := func(key string) bool { - for _, envTmpl := range cfg.EnvTemplates { - if *envTmpl.MapToEnvironmentVariable == key { - return true - } - } - return false - } - - for _, expected := range expectedKeys { - if !envExists(expected) { - t.Fatalf("expected environment variable %s", expected) - } - } -} - -// TestLoadConfigFile_EnvTemplates_WithSource loads and validates an -// env_template config with "source" instead of "contents" -func TestLoadConfigFile_EnvTemplates_WithSource(t *testing.T) { - cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-with-source.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := cfg.ValidateConfig(); err != nil { - t.Fatalf("validation error: %s", err) - } -} - -// TestLoadConfigFile_EnvTemplates_NoName ensures that env_template with no name triggers an error -func TestLoadConfigFile_EnvTemplates_NoName(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-no-name.hcl") - if err == nil { - t.Fatalf("expected error") - } -} - -// TestLoadConfigFile_EnvTemplates_ExecInvalidSignal ensures that an invalid signal triggers an error -func TestLoadConfigFile_EnvTemplates_ExecInvalidSignal(t *testing.T) { - _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-invalid-signal.hcl") - if err == nil { - t.Fatalf("expected error") - } -} - -// TestLoadConfigFile_EnvTemplates_ExecSimple validates the exec section with default parameters -func TestLoadConfigFile_EnvTemplates_ExecSimple(t *testing.T) { - cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := cfg.ValidateConfig(); err != nil { - t.Fatalf("validation error: %s", err) - } - - expectedCmd := []string{"/path/to/my/app", "arg1", "arg2"} - if !slices.Equal(cfg.Exec.Command, expectedCmd) { - t.Fatal("exec.command does not have expected value") - } - - // check defaults - if cfg.Exec.RestartOnSecretChanges != "always" { - t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'always', got '%s'", cfg.Exec.RestartOnSecretChanges) - } - - if cfg.Exec.RestartStopSignal != syscall.SIGTERM { - t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGTERM', got '%s'", cfg.Exec.RestartStopSignal) - } -} - -// TestLoadConfigFile_EnvTemplates_ExecComplex validates the exec section with non-default parameters -func TestLoadConfigFile_EnvTemplates_ExecComplex(t *testing.T) { - cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := cfg.ValidateConfig(); err != nil { - t.Fatalf("validation error: %s", err) - } - - if !slices.Equal(cfg.Exec.Command, []string{"env"}) { - t.Fatal("exec.command does not have expected value") - } - - if cfg.Exec.RestartOnSecretChanges != "never" { - t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'never', got %q", cfg.Exec.RestartOnSecretChanges) - } - - if cfg.Exec.RestartStopSignal != syscall.SIGINT { - t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGINT', got %q", cfg.Exec.RestartStopSignal) - } -} - -// TestLoadConfigFile_Bad_EnvTemplates_MissingExec ensures that ValidateConfig -// errors when "env_template" stanza(s) are specified but "exec" is missing -func TestLoadConfigFile_Bad_EnvTemplates_MissingExec(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-missing-exec.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := config.ValidateConfig(); err == nil { - t.Fatal("expected an error from ValidateConfig: exec section is missing") - } -} - -// TestLoadConfigFile_Bad_EnvTemplates_WithProxy ensures that ValidateConfig -// errors when both env_template and api_proxy stanzas are present -func TestLoadConfigFile_Bad_EnvTemplates_WithProxy(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-proxy.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := config.ValidateConfig(); err == nil { - t.Fatal("expected an error from ValidateConfig: listener / api_proxy are not compatible with env_template") - } -} - -// TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates ensures that -// ValidateConfig errors when both env_template and template stanzas are present -func TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-file-templates.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := config.ValidateConfig(); err == nil { - t.Fatal("expected an error from ValidateConfig: file template stanza is not compatible with env_template") - } -} - -// TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields ensure that -// ValidateConfig errors for disalowed env_template fields -func TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-disalowed-fields.hcl") - if err != nil { - t.Fatalf("error loading config file: %s", err) - } - - if err := config.ValidateConfig(); err == nil { - t.Fatal("expected an error from ValidateConfig: disallowed fields specified in env_template") - } -} diff --git a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl deleted file mode 100644 index 7d2bf5c2d3a96..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } -} - -cache { - use_auto_auth_token = true -} - -api_proxy { - use_auto_auth_token = "force" -} - -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl index d3d5d426695ae..93e31aad4d737 100644 --- a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl index 5c2b3fb79a792..9a491fa4efc25 100644 --- a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl index 8a39837fa721b..5821c1b59f839 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl index d1cae75129707..e2c8b328eb02d 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl index 38b9c2c12207b..5029b8d376416 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl index 9112183ea8427..9d8110e2d67c6 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl index 34c292e74f9bb..c13c82520ee6b 100644 --- a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl +++ b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_idle_connections = ["foo","caching","templating"] diff --git a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl index 087e2ffa010d2..3f1b9f0a198ef 100644 --- a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl +++ b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_keep_alives = ["foo","caching","templating"] diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl deleted file mode 100644 index 22ad96c5cf930..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl +++ /dev/null @@ -1,33 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/Users/avean/.vault-token" - } - } -} - -template_config { - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault { - address = "http://localhost:8200" -} - -env_template "FOO_PASSWORD" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" - - # Error: destination and create_dest_dirs are not allowed in env_template - destination = "/path/on/disk/where/template/will/render.txt" - create_dest_dirs = true -} - -exec { - command = ["./my-app", "arg1", "arg2"] - restart_on_secret_changes = "always" - restart_stop_signal = "SIGTERM" -} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl deleted file mode 100644 index e8d822d64b0d4..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl +++ /dev/null @@ -1,26 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/home/username/.vault-token" - } - } -} - -vault { - address = "http://localhost:8200" -} - -env_template "FOO" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" - error_on_missing_key = false -} - - -exec { - command = ["env"] - restart_on_secret_changes = "never" - restart_stop_signal = "notasignal" -} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl deleted file mode 100644 index 6283e56a9a467..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl +++ /dev/null @@ -1,30 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/Users/avean/.vault-token" - } - } -} - -template_config { - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault { - address = "http://localhost:8200" -} - -env_template "FOO_PASSWORD" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" - error_on_missing_key = false -} -env_template "FOO_USER" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" - error_on_missing_key = false -} - -# Error: missing a required "exec" section! diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl deleted file mode 100644 index f77f20c11227e..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl +++ /dev/null @@ -1,26 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/home/username/.vault-token" - } - } -} - -vault { - address = "http://localhost:8200" -} - -env_template { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" - error_on_missing_key = false -} - - -exec { - command = ["env"] - restart_on_secret_changes = "never" - restart_stop_signal = "SIGTERM" -} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl deleted file mode 100644 index 811b10d5208f2..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl +++ /dev/null @@ -1,40 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/Users/avean/.vault-token" - } - } -} - -template_config { - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault { - address = "http://localhost:8200" -} - -# Error: template is incompatible with env_template! -template { - source = "/path/on/disk/to/template.ctmpl" - destination = "/path/on/disk/where/template/will/render.txt" -} - -env_template "FOO_PASSWORD" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" - error_on_missing_key = false -} -env_template "FOO_USER" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" - error_on_missing_key = false -} - -exec { - command = ["./my-app", "arg1", "arg2"] - restart_on_secret_changes = "always" - restart_stop_signal = "SIGTERM" -} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl deleted file mode 100644 index 3c6095ddeae82..0000000000000 --- a/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl +++ /dev/null @@ -1,47 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/Users/avean/.vault-token" - } - } -} - -template_config { - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault { - address = "http://localhost:8200" -} - -env_template "FOO_PASSWORD" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" - error_on_missing_key = false -} -env_template "FOO_USER" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" - error_on_missing_key = false -} - -exec { - command = ["./my-app", "arg1", "arg2"] - restart_on_secret_changes = "always" - restart_stop_signal = "SIGTERM" -} - -# Error: api_proxy is incompatible with env_template -api_proxy { - use_auto_auth_token = "force" - enforce_consistency = "always" - when_inconsistent = "forward" -} - -# Error: listener is incompatible with env_template -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} diff --git a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl index cb9696dfb3f16..7a375737161a8 100644 --- a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl +++ b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl deleted file mode 100644 index a3e4e5b99a2a9..0000000000000 --- a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } -} - -api_proxy { - use_auto_auth_token = "force" - enforce_consistency = "always" - when_inconsistent = "forward" -} - -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl index 252216e21d7fc..1a2fd91d6c70e 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl index 429645527a15a..9aad89cdd2aa4 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl index 80486b346b1c2..b3dc1383f6762 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl index cebcdfbc58682..ccadc501d07c9 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { @@ -14,10 +11,10 @@ auto_auth { cache { use_auto_auth_token = "true" - force_auto_auth_token = false } listener "tcp" { address = "127.0.0.1:8300" tls_disable = true } + diff --git a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl index 4ea525753b6c8..3c56153157cac 100644 --- a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { @@ -49,14 +46,6 @@ listener { listener { type = "tcp" - address = "127.0.0.1:3000" - tls_disable = true - role = "metrics_only" -} - -listener { - type = "tcp" - role = "default" address = "127.0.0.1:8400" tls_key_file = "/path/to/cakey.pem" tls_cert_file = "/path/to/cacert.pem" diff --git a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl index 45c71412bdd5e..7a2a57f683aec 100644 --- a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl +++ b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl index 3e0abfb6b9eb7..d7176e0aa539a 100644 --- a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl +++ b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl index f40715e6e418f..55f1d64801612 100644 --- a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl +++ b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/config-cache-persist-false.hcl b/command/agent/config/test-fixtures/config-cache-persist-false.hcl index 77bb926cffdfc..5ab7f04499118 100644 --- a/command/agent/config/test-fixtures/config-cache-persist-false.hcl +++ b/command/agent/config/test-fixtures/config-cache-persist-false.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/config-cache.hcl b/command/agent/config/test-fixtures/config-cache.hcl index 87fa5afad8618..b468e9a077b28 100644 --- a/command/agent/config/test-fixtures/config-cache.hcl +++ b/command/agent/config/test-fixtures/config-cache.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { @@ -46,15 +43,7 @@ listener "tcp" { tls_disable = true } -listener { - type = "tcp" - address = "127.0.0.1:3000" - tls_disable = true - role = "metrics_only" -} - listener "tcp" { - role = "default" address = "127.0.0.1:8400" tls_key_file = "/path/to/cakey.pem" tls_cert_file = "/path/to/cacert.pem" diff --git a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl deleted file mode 100644 index c2e662a67206a..0000000000000 --- a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -api_proxy { - enforce_consistency = "always" - when_inconsistent = "retry" -} - -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} diff --git a/command/agent/config/test-fixtures/config-consistency.hcl b/command/agent/config/test-fixtures/config-consistency.hcl index 535181197c6c1..d57e0557362e1 100644 --- a/command/agent/config/test-fixtures/config-consistency.hcl +++ b/command/agent/config/test-fixtures/config-consistency.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - cache { enforce_consistency = "always" when_inconsistent = "retry" diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl deleted file mode 100644 index c900df6b1aa3f..0000000000000 --- a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl deleted file mode 100644 index 2e942da663818..0000000000000 --- a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl deleted file mode 100644 index 767cdd9e4a5f2..0000000000000 --- a/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} - -listener "unix" { - address = "/path/to/socket" - tls_disable = true - socket_mode = "configmode" - socket_user = "configuser" - socket_group = "configgroup" -} - -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} - -listener { - type = "tcp" - address = "127.0.0.1:3000" - tls_disable = true - role = "metrics_only" -} - -listener "tcp" { - role = "default" - address = "127.0.0.1:8400" - tls_key_file = "/path/to/cakey.pem" - tls_cert_file = "/path/to/cacert.pem" -} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl deleted file mode 100644 index f4d0f47f96120..0000000000000 --- a/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -cache { - use_auto_auth_token = true - persist = { - type = "kubernetes" - path = "/vault/agent-cache/" - keep_after_import = true - exit_on_err = true - service_account_token_file = "/tmp/serviceaccount/token" - } -} - -vault { - address = "http://127.0.0.1:1111" - ca_cert = "config_ca_cert" - ca_path = "config_ca_path" - tls_skip_verify = "true" - client_cert = "config_client_cert" - client_key = "config_client_key" -} diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl deleted file mode 100644 index 18729533f4414..0000000000000 --- a/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -vault { - address = "http://127.0.0.1:1111" - ca_cert = "config_ca_cert" - ca_path = "config_ca_path" - tls_skip_verify = "true" - client_cert = "config_client_cert" - client_key = "config_client_key" -} diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl deleted file mode 100644 index c900df6b1aa3f..0000000000000 --- a/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl index f312d420c8e78..69ff548f55614 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl @@ -1,8 +1,5 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" -disable_idle_connections = ["auto-auth","caching","templating","proxying"] +disable_idle_connections = ["auto-auth","caching","templating"] auto_auth { method { diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl index abb1756697fa3..1a63b20480d4f 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_idle_connections = ["auto-auth"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl index 95a36e925a58b..30d0806c03371 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_idle_connections = ["caching"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl index 3e490bfbc4c2f..eb95310cedfff 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_idle_connections = [] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl deleted file mode 100644 index 88da2effcc718..0000000000000 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" -disable_idle_connections = ["proxying"] - -auto_auth { - method { - type = "aws" - namespace = "my-namespace/" - - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} - -vault { - address = "http://127.0.0.1:1111" -} diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl index 6e51c91a0e4b4..922377fc82a96 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_idle_connections = ["templating"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl index 8c1c6d58282ab..9b22cfd8be67d 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl @@ -1,8 +1,5 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" -disable_keep_alives = ["auto-auth","caching","templating","proxying"] +disable_keep_alives = ["auto-auth","caching","templating"] auto_auth { method { diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl index d77dfb2783785..11393bfb57a60 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_keep_alives = ["auto-auth"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl index 386267e3e8014..5712296924edf 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_keep_alives = ["caching"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl index b4239a5261ed5..8cddcebd8f106 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_keep_alives = [] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl deleted file mode 100644 index 8c82a92b6e157..0000000000000 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" -disable_keep_alives = ["proxying"] - -auto_auth { - method { - type = "aws" - namespace = "my-namespace/" - - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} - -vault { - address = "http://127.0.0.1:1111" -} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl index 01ec09504544d..d4731cbd90e2e 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" disable_keep_alives = ["templating"] diff --git a/command/agent/config/test-fixtures/config-embedded-type.hcl b/command/agent/config/test-fixtures/config-embedded-type.hcl index 2ce3b401ca7ed..919bfd907757a 100644 --- a/command/agent/config/test-fixtures/config-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-embedded-type.hcl @@ -1,8 +1,4 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" -log_file = "/var/log/vault/vault-agent.log" auto_auth { method "aws" { diff --git a/command/agent/config/test-fixtures/config-env-templates-complex.hcl b/command/agent/config/test-fixtures/config-env-templates-complex.hcl deleted file mode 100644 index 639b1288f24c0..0000000000000 --- a/command/agent/config/test-fixtures/config-env-templates-complex.hcl +++ /dev/null @@ -1,36 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/home/username/.vault-token" - } - } -} - -cache {} - -template_config { - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault { - address = "http://localhost:8200" -} - -env_template "FOO_PASSWORD" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" - error_on_missing_key = false -} -env_template "FOO_USER" { - contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" - error_on_missing_key = false -} - -exec { - command = ["env"] - restart_on_secret_changes = "never" - restart_stop_signal = "SIGINT" -} diff --git a/command/agent/config/test-fixtures/config-env-templates-simple.hcl b/command/agent/config/test-fixtures/config-env-templates-simple.hcl deleted file mode 100644 index 441563b1e9f0a..0000000000000 --- a/command/agent/config/test-fixtures/config-env-templates-simple.hcl +++ /dev/null @@ -1,18 +0,0 @@ -auto_auth { - - method { - type = "token_file" - - config { - token_file_path = "/Users/avean/.vault-token" - } - } -} - -env_template "MY_DATABASE_USER" { - contents = "{{ with secret \"secret/db-secret\" }}{{ .Data.data.user }}{{ end }}" -} - -exec { - command = ["/path/to/my/app", "arg1", "arg2"] -} diff --git a/command/agent/config/test-fixtures/config-env-templates-with-source.hcl b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl deleted file mode 100644 index d51cb5553a917..0000000000000 --- a/command/agent/config/test-fixtures/config-env-templates-with-source.hcl +++ /dev/null @@ -1,16 +0,0 @@ -auto_auth { - method { - type = "token_file" - config { - token_file_path = "/home/username/.vault-token" - } - } -} - -env_template "MY_PASSWORD" { - source = "/path/on/disk/to/template.ctmpl" -} - -exec { - command = ["/path/to/my/app", "arg1", "arg2"] -} diff --git a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl index bbda08c01b63a..c52140102f1c8 100644 --- a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl +++ b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl index b166dabde4183..6b9343aa4ba62 100644 --- a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl +++ b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-method-wrapping.hcl b/command/agent/config/test-fixtures/config-method-wrapping.hcl index 8142a19dd80f0..cbafc5a245937 100644 --- a/command/agent/config/test-fixtures/config-method-wrapping.hcl +++ b/command/agent/config/test-fixtures/config-method-wrapping.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-full-nosink.hcl b/command/agent/config/test-fixtures/config-template-full-nosink.hcl index 579aae1e86ca6..84edf6f11e3c7 100644 --- a/command/agent/config/test-fixtures/config-template-full-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-full-nosink.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-full.hcl b/command/agent/config/test-fixtures/config-template-full.hcl index b7641cd66403f..5e5cbc62cd7b7 100644 --- a/command/agent/config/test-fixtures/config-template-full.hcl +++ b/command/agent/config/test-fixtures/config-template-full.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-many-nosink.hcl b/command/agent/config/test-fixtures/config-template-many-nosink.hcl index 2f8352d1b0008..e04f77da263e2 100644 --- a/command/agent/config/test-fixtures/config-template-many-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-many-nosink.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-many.hcl b/command/agent/config/test-fixtures/config-template-many.hcl index 3a3ce77f1f462..2f6fe7b70b6d9 100644 --- a/command/agent/config/test-fixtures/config-template-many.hcl +++ b/command/agent/config/test-fixtures/config-template-many.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-min-nosink.hcl b/command/agent/config/test-fixtures/config-template-min-nosink.hcl index 064b7a452f699..12c5a82c5385c 100644 --- a/command/agent/config/test-fixtures/config-template-min-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-min-nosink.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-min.hcl b/command/agent/config/test-fixtures/config-template-min.hcl index 34435da638c5b..5d37dbefbab1e 100644 --- a/command/agent/config/test-fixtures/config-template-min.hcl +++ b/command/agent/config/test-fixtures/config-template-min.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-with-cache.hcl b/command/agent/config/test-fixtures/config-template-with-cache.hcl deleted file mode 100644 index 8f43b8311eeeb..0000000000000 --- a/command/agent/config/test-fixtures/config-template-with-cache.hcl +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - namespace = "/my-namespace" - - config = { - role = "foobar" - } - } -} - -cache {} - -template { - source = "/path/on/disk/to/template.ctmpl" - destination = "/path/on/disk/where/template/will/render.txt" -} diff --git a/command/agent/config/test-fixtures/config-template_config-empty.hcl b/command/agent/config/test-fixtures/config-template_config-empty.hcl index ac22dcc5cd6cf..a4f5b3a0938f5 100644 --- a/command/agent/config/test-fixtures/config-template_config-empty.hcl +++ b/command/agent/config/test-fixtures/config-template_config-empty.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - vault { address = "http://127.0.0.1:1111" retry { diff --git a/command/agent/config/test-fixtures/config-template_config.hcl b/command/agent/config/test-fixtures/config-template_config.hcl index b550890018d4a..5da0e2b9127b9 100644 --- a/command/agent/config/test-fixtures/config-template_config.hcl +++ b/command/agent/config/test-fixtures/config-template_config.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - vault { address = "http://127.0.0.1:1111" retry { diff --git a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl index 72c44e1f97bdc..e7ab4aa0017f2 100644 --- a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl +++ b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-vault-retry.hcl b/command/agent/config/test-fixtures/config-vault-retry.hcl index 5e4ee234304ec..0c1cfa19b0646 100644 --- a/command/agent/config/test-fixtures/config-vault-retry.hcl +++ b/command/agent/config/test-fixtures/config-vault-retry.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config.hcl b/command/agent/config/test-fixtures/config.hcl index 18ec360309c99..b02170736a8ef 100644 --- a/command/agent/config/test-fixtures/config.hcl +++ b/command/agent/config/test-fixtures/config.hcl @@ -1,8 +1,4 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - pid_file = "./pidfile" -log_file = "/var/log/vault/vault-agent.log" auto_auth { method { diff --git a/command/agent/doc.go b/command/agent/doc.go index e9f0f0b98fe3c..0786f5c1d3949 100644 --- a/command/agent/doc.go +++ b/command/agent/doc.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - /* Package agent implements a daemon mode of Vault designed to provide helper features like auto-auth, caching, and templating. diff --git a/command/agent/exec/exec.go b/command/agent/exec/exec.go deleted file mode 100644 index b22e5eb9302af..0000000000000 --- a/command/agent/exec/exec.go +++ /dev/null @@ -1,332 +0,0 @@ -package exec - -import ( - "context" - "fmt" - "io" - "os" - "sort" - "sync" - "time" - - "github.com/hashicorp/consul-template/child" - ctconfig "github.com/hashicorp/consul-template/config" - "github.com/hashicorp/consul-template/manager" - "github.com/hashicorp/go-hclog" - "golang.org/x/exp/slices" - - "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/internal/ctmanager" - "github.com/hashicorp/vault/helper/useragent" - "github.com/hashicorp/vault/sdk/helper/pointerutil" -) - -type childProcessState uint8 - -const ( - childProcessStateNotStarted childProcessState = iota - childProcessStateRunning - childProcessStateRestarting - childProcessStateStopped -) - -type ServerConfig struct { - Logger hclog.Logger - AgentConfig *config.Config - - Namespace string - - // LogLevel is needed to set the internal Consul Template Runner's log level - // to match the log level of Vault Agent. The internal Runner creates it's own - // logger and can't be set externally or copied from the Template Server. - // - // LogWriter is needed to initialize Consul Template's internal logger to use - // the same io.Writer that Vault Agent itself is using. - LogLevel hclog.Level - LogWriter io.Writer -} - -type Server struct { - // config holds the ServerConfig used to create it. It's passed along in other - // methods - config *ServerConfig - - // runner is the consul-template runner - runner *manager.Runner - - // numberOfTemplates is the count of templates determined by consul-template, - // we keep the value to ensure all templates have been rendered before - // starting the child process - // NOTE: each template may have more than one TemplateConfig, so the numbers may not match up - numberOfTemplates int - - logger hclog.Logger - - childProcess *child.Child - childProcessState childProcessState - childProcessLock sync.Mutex - - // exit channel of the child process - childProcessExitCh chan int - - // lastRenderedEnvVars is the cached value of all environment variables - // rendered by the templating engine; it is used for detecting changes - lastRenderedEnvVars []string -} - -type ProcessExitError struct { - ExitCode int -} - -func (e *ProcessExitError) Error() string { - return fmt.Sprintf("process exited with %d", e.ExitCode) -} - -func NewServer(cfg *ServerConfig) *Server { - server := Server{ - logger: cfg.Logger, - config: cfg, - childProcessState: childProcessStateNotStarted, - childProcessExitCh: make(chan int), - } - - return &server -} - -func (s *Server) Run(ctx context.Context, incomingVaultToken chan string) error { - latestToken := new(string) - s.logger.Info("starting exec server") - defer func() { - s.logger.Info("exec server stopped") - }() - - if len(s.config.AgentConfig.EnvTemplates) == 0 || s.config.AgentConfig.Exec == nil { - s.logger.Info("no env templates or exec config, exiting") - <-ctx.Done() - return nil - } - - managerConfig := ctmanager.ManagerConfig{ - AgentConfig: s.config.AgentConfig, - Namespace: s.config.Namespace, - LogLevel: s.config.LogLevel, - LogWriter: s.config.LogWriter, - } - - runnerConfig, err := ctmanager.NewConfig(managerConfig, s.config.AgentConfig.EnvTemplates) - if err != nil { - return fmt.Errorf("template server failed to generate runner config: %w", err) - } - - // We leave this in "dry" mode, as there are no files to render; - // we will get the environment variables rendered contents from the incoming events - s.runner, err = manager.NewRunner(runnerConfig, true) - if err != nil { - return fmt.Errorf("template server failed to create: %w", err) - } - - // prevent the templates from being rendered to stdout in "dry" mode - s.runner.SetOutStream(io.Discard) - - s.numberOfTemplates = len(s.runner.TemplateConfigMapping()) - - // We receive multiple events every staticSecretRenderInterval - // from <-s.runner.TemplateRenderedCh(), one for each secret. Only the last - // event in a batch will contain the latest set of all secrets and the - // corresponding environment variables. This timer will fire after 2 seconds - // unless an event comes in which resets the timer back to 2 seconds. - var debounceTimer *time.Timer - - // capture the errors related to restarting the child process - restartChildProcessErrCh := make(chan error) - - for { - select { - case <-ctx.Done(): - s.runner.Stop() - s.childProcessLock.Lock() - if s.childProcess != nil { - s.childProcess.Stop() - } - s.childProcessState = childProcessStateStopped - s.childProcessLock.Unlock() - return nil - - case token := <-incomingVaultToken: - if token != *latestToken { - s.logger.Info("exec server received new token") - - s.runner.Stop() - *latestToken = token - newTokenConfig := ctconfig.Config{ - Vault: &ctconfig.VaultConfig{ - Token: latestToken, - ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), - }, - } - - // got a new auth token, merge it in with the existing config - runnerConfig = runnerConfig.Merge(&newTokenConfig) - s.runner, err = manager.NewRunner(runnerConfig, true) - if err != nil { - s.logger.Error("template server failed with new Vault token", "error", err) - continue - } - - // prevent the templates from being rendered to stdout in "dry" mode - s.runner.SetOutStream(io.Discard) - - go s.runner.Start() - } - - case err := <-s.runner.ErrCh: - s.logger.Error("template server error", "error", err.Error()) - s.runner.StopImmediately() - - // Return after stopping the runner if exit on retry failure was specified - if s.config.AgentConfig.TemplateConfig != nil && s.config.AgentConfig.TemplateConfig.ExitOnRetryFailure { - return fmt.Errorf("template server: %w", err) - } - - s.runner, err = manager.NewRunner(runnerConfig, true) - if err != nil { - return fmt.Errorf("template server failed to create: %w", err) - } - go s.runner.Start() - - case <-s.runner.TemplateRenderedCh(): - // A template has been rendered, figure out what to do - s.logger.Trace("template rendered") - events := s.runner.RenderEvents() - - // This checks if we've finished rendering the initial set of templates, - // for every consecutive re-render len(events) should equal s.numberOfTemplates - if len(events) < s.numberOfTemplates { - // Not all templates have been rendered yet - continue - } - - // assume the renders are finished, until we find otherwise - doneRendering := true - var renderedEnvVars []string - for _, event := range events { - // This template hasn't been rendered - if event.LastWouldRender.IsZero() { - doneRendering = false - break - } else { - for _, tcfg := range event.TemplateConfigs { - envVar := fmt.Sprintf("%s=%s", *tcfg.MapToEnvironmentVariable, event.Contents) - renderedEnvVars = append(renderedEnvVars, envVar) - } - } - } - if !doneRendering { - continue - } - - // sort the environment variables for a deterministic output and easy comparison - sort.Strings(renderedEnvVars) - - s.logger.Trace("done rendering templates") - - // don't restart the process unless a change is detected - if slices.Equal(s.lastRenderedEnvVars, renderedEnvVars) { - continue - } - - s.lastRenderedEnvVars = renderedEnvVars - - s.logger.Debug("detected a change in the environment variables: restarting the child process") - - // if a timer exists, stop it - if debounceTimer != nil { - debounceTimer.Stop() - } - debounceTimer = time.AfterFunc(2*time.Second, func() { - if err := s.restartChildProcess(renderedEnvVars); err != nil { - restartChildProcessErrCh <- fmt.Errorf("unable to restart the child process: %w", err) - } - }) - - case err := <-restartChildProcessErrCh: - // catch the error from restarting - return err - - case exitCode := <-s.childProcessExitCh: - // process exited on its own - return &ProcessExitError{ExitCode: exitCode} - } - } -} - -func (s *Server) restartChildProcess(newEnvVars []string) error { - s.childProcessLock.Lock() - defer s.childProcessLock.Unlock() - - switch s.config.AgentConfig.Exec.RestartOnSecretChanges { - case "always": - if s.childProcessState == childProcessStateRunning { - // process is running, need to kill it first - s.logger.Info("stopping process", "process_id", s.childProcess.Pid()) - s.childProcessState = childProcessStateRestarting - s.childProcess.Stop() - } - case "never": - if s.childProcessState == childProcessStateRunning { - s.logger.Info("detected update, but not restarting process", "process_id", s.childProcess.Pid()) - return nil - } - default: - return fmt.Errorf("invalid value for restart-on-secret-changes: %q", s.config.AgentConfig.Exec.RestartOnSecretChanges) - } - - args, subshell, err := child.CommandPrep(s.config.AgentConfig.Exec.Command) - if err != nil { - return fmt.Errorf("unable to parse command: %w", err) - } - - childInput := &child.NewInput{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - Command: args[0], - Args: args[1:], - Timeout: 0, // let it run forever - Env: append(os.Environ(), newEnvVars...), - ReloadSignal: nil, // can't reload w/ new env vars - KillSignal: s.config.AgentConfig.Exec.RestartStopSignal, - KillTimeout: 30 * time.Second, - Splay: 0, - Setpgid: subshell, - Logger: s.logger.StandardLogger(nil), - } - - proc, err := child.New(childInput) - if err != nil { - return err - } - s.childProcess = proc - - if err := s.childProcess.Start(); err != nil { - return fmt.Errorf("error starting the child process: %w", err) - } - - s.childProcessState = childProcessStateRunning - - // Listen if the child process exits and bubble it up to the main loop. - // - // NOTE: this must be invoked after child.Start() to avoid a potential - // race condition with ExitCh not being initialized. - go func() { - select { - case exitCode, ok := <-proc.ExitCh(): - // ignore ExitCh channel closures caused by our restarts - if ok { - s.childProcessExitCh <- exitCode - } - } - }() - - return nil -} diff --git a/command/agent/exec/exec_test.go b/command/agent/exec/exec_test.go deleted file mode 100644 index 3c13c34a9f516..0000000000000 --- a/command/agent/exec/exec_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package exec - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "strconv" - "syscall" - "testing" - "time" - - ctconfig "github.com/hashicorp/consul-template/config" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" - - "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/helper/pointerutil" -) - -func fakeVaultServer(t *testing.T) *httptest.Server { - t.Helper() - - firstRequest := true - - mux := http.NewServeMux() - mux.HandleFunc("/v1/kv/my-app/creds", func(w http.ResponseWriter, r *http.Request) { - // change the password on the second request to re-render the template - var password string - - if firstRequest { - password = "s3cr3t" - } else { - password = "s3cr3t-two" - } - - firstRequest = false - - fmt.Fprintf(w, `{ - "request_id": "8af096e9-518c-7351-eff5-5ba20554b21f", - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": { - "data": { - "password": "%s", - "user": "app-user" - }, - "metadata": { - "created_time": "2019-10-07T22:18:44.233247Z", - "deletion_time": "", - "destroyed": false, - "version": 3 - } - }, - "wrap_info": null, - "warnings": null, - "auth": null - }`, - password, - ) - }) - - return httptest.NewServer(mux) -} - -// TestExecServer_Run tests various scenarios of using vault agent as a process -// supervisor. At its core is a sample application referred to as 'test app', -// compiled from ./test-app/main.go. Each test case verifies that the test app -// is started and/or stopped correctly by exec.Server.Run(). There are 3 -// high-level scenarios we want to test for: -// -// 1. test app is started and is injected with environment variables -// 2. test app exits early (either with zero or non-zero extit code) -// 3. test app needs to be stopped (and restarted) by exec.Server -func TestExecServer_Run(t *testing.T) { - // we must build a test-app binary since 'go run' does not propagate signals correctly - goBinary, err := exec.LookPath("go") - if err != nil { - t.Fatalf("could not find go binary on path: %s", err) - } - - testAppBinary := filepath.Join(os.TempDir(), "test-app") - - if err := exec.Command(goBinary, "build", "-o", testAppBinary, "./test-app").Run(); err != nil { - t.Fatalf("could not build the test application: %s", err) - } - defer func() { - if err := os.Remove(testAppBinary); err != nil { - t.Fatalf("could not remove %q test application: %s", testAppBinary, err) - } - }() - - testCases := map[string]struct { - // skip this test case - skip bool - skipReason string - - // inputs to the exec server - envTemplates []*ctconfig.TemplateConfig - staticSecretRenderInterval time.Duration - - // test app parameters - testAppArgs []string - testAppStopSignal os.Signal - testAppPort int - - // simulate a shutdown of agent, which, in turn stops the test app - simulateShutdown bool - simulateShutdownWaitDuration time.Duration - - // expected results - expected map[string]string - expectedTestDuration time.Duration - expectedError error - }{ - "ensure_environment_variables_are_injected": { - skip: true, - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }, { - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), - }}, - testAppArgs: []string{"--stop-after", "10s"}, - testAppStopSignal: syscall.SIGTERM, - testAppPort: 34001, - expected: map[string]string{ - "MY_USER": "app-user", - "MY_PASSWORD": "s3cr3t", - }, - expectedTestDuration: 15 * time.Second, - expectedError: nil, - }, - - "password_changes_test_app_should_restart": { - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }, { - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), - }}, - staticSecretRenderInterval: 5 * time.Second, - testAppArgs: []string{"--stop-after", "15s", "--sleep-after-stop-signal", "0s"}, - testAppStopSignal: syscall.SIGTERM, - testAppPort: 34002, - expected: map[string]string{ - "MY_USER": "app-user", - "MY_PASSWORD": "s3cr3t-two", - }, - expectedTestDuration: 15 * time.Second, - expectedError: nil, - }, - - "test_app_exits_early": { - skip: true, - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }}, - testAppArgs: []string{"--stop-after", "1s"}, - testAppStopSignal: syscall.SIGTERM, - testAppPort: 34003, - expectedTestDuration: 15 * time.Second, - expectedError: &ProcessExitError{0}, - }, - - "test_app_exits_early_non_zero": { - skip: true, - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }}, - testAppArgs: []string{"--stop-after", "1s", "--exit-code", "5"}, - testAppStopSignal: syscall.SIGTERM, - testAppPort: 34004, - expectedTestDuration: 15 * time.Second, - expectedError: &ProcessExitError{5}, - }, - - "send_sigterm_expect_test_app_exit": { - skip: true, - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }}, - testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s"}, - testAppStopSignal: syscall.SIGTERM, - testAppPort: 34005, - simulateShutdown: true, - simulateShutdownWaitDuration: 3 * time.Second, - expectedTestDuration: 15 * time.Second, - expectedError: nil, - }, - - "send_sigusr1_expect_test_app_exit": { - skip: true, - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }}, - testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s", "--use-sigusr1"}, - testAppStopSignal: syscall.SIGUSR1, - testAppPort: 34006, - simulateShutdown: true, - simulateShutdownWaitDuration: 3 * time.Second, - expectedTestDuration: 15 * time.Second, - expectedError: nil, - }, - - "test_app_ignores_stop_signal": { - skip: true, - skipReason: "This test currently fails with 'go test -race' (see hashicorp/consul-template/issues/1753).", - envTemplates: []*ctconfig.TemplateConfig{{ - Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), - MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), - }}, - testAppArgs: []string{"--stop-after", "60s", "--sleep-after-stop-signal", "60s"}, - testAppStopSignal: syscall.SIGTERM, - testAppPort: 34007, - simulateShutdown: true, - simulateShutdownWaitDuration: 32 * time.Second, // the test app should be stopped immediately after 30s - expectedTestDuration: 45 * time.Second, - expectedError: nil, - }, - } - - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - if testCase.skip { - t.Skip(testCase.skipReason) - } - - t.Logf("test case %s: begin", name) - defer t.Logf("test case %s: end", name) - - fakeVault := fakeVaultServer(t) - defer fakeVault.Close() - - ctx, cancelContextFunc := context.WithTimeout(context.Background(), testCase.expectedTestDuration) - defer cancelContextFunc() - - testAppCommand := []string{ - testAppBinary, - "--port", - strconv.Itoa(testCase.testAppPort), - } - - execServer := NewServer(&ServerConfig{ - Logger: logging.NewVaultLogger(hclog.Trace), - AgentConfig: &config.Config{ - Vault: &config.Vault{ - Address: fakeVault.URL, - Retry: &config.Retry{ - NumRetries: 3, - }, - }, - Exec: &config.ExecConfig{ - RestartOnSecretChanges: "always", - Command: append(testAppCommand, testCase.testAppArgs...), - RestartStopSignal: testCase.testAppStopSignal, - }, - EnvTemplates: testCase.envTemplates, - TemplateConfig: &config.TemplateConfig{ - ExitOnRetryFailure: true, - StaticSecretRenderInt: testCase.staticSecretRenderInterval, - }, - }, - LogLevel: hclog.Trace, - LogWriter: hclog.DefaultOutput, - }) - - // start the exec server - var ( - execServerErrCh = make(chan error) - execServerTokenCh = make(chan string, 1) - ) - go func() { - execServerErrCh <- execServer.Run(ctx, execServerTokenCh) - }() - - // send a dummy token to kick off the server - execServerTokenCh <- "my-token" - - // ensure the test app is running after 3 seconds - var ( - testAppAddr = fmt.Sprintf("http://localhost:%d", testCase.testAppPort) - testAppStartedCh = make(chan error) - ) - if testCase.expectedError == nil { - time.AfterFunc(500*time.Millisecond, func() { - _, err := retryablehttp.Head(testAppAddr) - testAppStartedCh <- err - }) - } - - select { - case <-ctx.Done(): - t.Fatal("timeout reached before templates were rendered") - - case err := <-execServerErrCh: - if testCase.expectedError == nil && err != nil { - t.Fatalf("exec server did not expect an error, got: %v", err) - } - - if errors.Is(err, testCase.expectedError) { - t.Fatalf("exec server expected error %v; got %v", testCase.expectedError, err) - } - - t.Log("exec server exited without an error") - - return - - case err := <-testAppStartedCh: - if testCase.expectedError == nil && err != nil { - t.Fatalf("test app could not be started") - } - - t.Log("test app started successfully") - } - - // expect the test app to restart after staticSecretRenderInterval + debounce timer due to a password change - if testCase.staticSecretRenderInterval != 0 { - t.Logf("sleeping for %v to wait for application restart", testCase.staticSecretRenderInterval+5*time.Second) - time.Sleep(testCase.staticSecretRenderInterval + 5*time.Second) - } - - // simulate a shutdown of agent, which, in turn stops the test app - if testCase.simulateShutdown { - cancelContextFunc() - - time.Sleep(testCase.simulateShutdownWaitDuration) - - // check if the test app is still alive - if _, err := http.Head(testAppAddr); err == nil { - t.Fatalf("the test app is still alive %v after a simulated shutdown!", testCase.simulateShutdownWaitDuration) - } - - return - } - - // verify the environment variables - t.Logf("verifying test-app's environment variables") - - resp, err := retryablehttp.Get(testAppAddr) - if err != nil { - t.Fatalf("error making request to the test app: %s", err) - } - defer resp.Body.Close() - - decoder := json.NewDecoder(resp.Body) - var response struct { - EnvironmentVariables map[string]string `json:"environment_variables"` - ProcessID int `json:"process_id"` - } - if err := decoder.Decode(&response); err != nil { - t.Fatalf("unable to parse response from test app: %s", err) - } - - for key, expectedValue := range testCase.expected { - actualValue, ok := response.EnvironmentVariables[key] - if !ok { - t.Fatalf("expected the test app to return %q environment variable", key) - } - if expectedValue != actualValue { - t.Fatalf("expected environment variable %s to have a value of %q but it has a value of %q", key, expectedValue, actualValue) - } - } - }) - } -} diff --git a/command/agent/exec/test-app/main.go b/command/agent/exec/test-app/main.go deleted file mode 100644 index 311ac7eb657b6..0000000000000 --- a/command/agent/exec/test-app/main.go +++ /dev/null @@ -1,150 +0,0 @@ -package main - -// This is a test application that is used by TestExecServer_Run to verify -// the behavior of vault agent running as a process supervisor. -// -// The app will automatically exit after 1 minute or the --stop-after interval, -// whichever comes first. It also can serve its loaded environment variables on -// the given --port. This app will also return the given --exit-code and -// terminate on SIGTERM unless --use-sigusr1 is specified. - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "flag" - "fmt" - "log" - "net/http" - "os" - "os/signal" - "strings" - "syscall" - "time" -) - -var ( - port uint - ignoreStopSignal bool - sleepAfterStopSignal time.Duration - useSigusr1StopSignal bool - stopAfter time.Duration - exitCode int -) - -func init() { - flag.UintVar(&port, "port", 34000, "port to run the test app on") - flag.DurationVar(&sleepAfterStopSignal, "sleep-after-stop-signal", 1*time.Second, "time to sleep after getting the signal before exiting") - flag.BoolVar(&useSigusr1StopSignal, "use-sigusr1", false, "use SIGUSR1 as the stop signal, instead of the default SIGTERM") - flag.DurationVar(&stopAfter, "stop-after", 0, "stop the process after duration (overrides all other flags if set)") - flag.IntVar(&exitCode, "exit-code", 0, "exit code to return when this script exits") -} - -type Response struct { - EnvironmentVariables map[string]string `json:"environment_variables"` - ProcessID int `json:"process_id"` -} - -func newResponse() Response { - respEnv := make(map[string]string, len(os.Environ())) - for _, envVar := range os.Environ() { - tokens := strings.Split(envVar, "=") - respEnv[tokens[0]] = tokens[1] - } - - return Response{ - EnvironmentVariables: respEnv, - ProcessID: os.Getpid(), - } -} - -func handler(w http.ResponseWriter, r *http.Request) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - if r.URL.Query().Get("pretty") == "1" { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(newResponse()); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - _, _ = w.Write(buf.Bytes()) -} - -func main() { - logger := log.New(os.Stderr, "test-app: ", log.LstdFlags) - - if err := run(logger); err != nil { - log.Fatalf("error: %v\n", err) - } - - logger.Printf("exit code: %d\n", exitCode) - - os.Exit(exitCode) -} - -func run(logger *log.Logger) error { - /* */ logger.Println("run: started") - defer logger.Println("run: done") - - ctx, cancelContextFunc := context.WithTimeout(context.Background(), 60*time.Second) - defer cancelContextFunc() - - flag.Parse() - - server := http.Server{ - Addr: fmt.Sprintf(":%d", port), - Handler: http.HandlerFunc(handler), - ReadTimeout: 20 * time.Second, - WriteTimeout: 20 * time.Second, - IdleTimeout: 20 * time.Second, - } - - doneCh := make(chan struct{}) - - go func() { - defer close(doneCh) - - stopSignal := make(chan os.Signal, 1) - if useSigusr1StopSignal { - signal.Notify(stopSignal, syscall.SIGUSR1) - } else { - signal.Notify(stopSignal, syscall.SIGTERM) - } - - select { - case <-ctx.Done(): - logger.Println("context done: exiting") - - case s := <-stopSignal: - logger.Printf("signal %q: received\n", s) - - if sleepAfterStopSignal > 0 { - logger.Printf("signal %q: sleeping for %v simulate cleanup\n", s, sleepAfterStopSignal) - time.Sleep(sleepAfterStopSignal) - } - - case <-time.After(stopAfter): - logger.Printf("stopping after: %v\n", stopAfter) - } - - if err := server.Shutdown(context.Background()); err != nil { - log.Printf("server shutdown error: %v", err) - } - }() - - logger.Printf("server %s: started\n", server.Addr) - - if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { - return fmt.Errorf("could not start the server: %v", err) - } - - logger.Printf("server %s: done\n", server.Addr) - - <-doneCh - - return nil -} diff --git a/command/agent/internal/ctmanager/runner_config.go b/command/agent/internal/ctmanager/runner_config.go deleted file mode 100644 index 90974981771e8..0000000000000 --- a/command/agent/internal/ctmanager/runner_config.go +++ /dev/null @@ -1,149 +0,0 @@ -package ctmanager - -import ( - "fmt" - "io" - "strings" - - ctconfig "github.com/hashicorp/consul-template/config" - ctlogging "github.com/hashicorp/consul-template/logging" - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/sdk/helper/pointerutil" -) - -type ManagerConfig struct { - AgentConfig *config.Config - Namespace string - LogLevel hclog.Level - LogWriter io.Writer -} - -// NewConfig returns a consul-template runner configuration, setting the -// Vault and Consul configurations based on the clients configs. -func NewConfig(mc ManagerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { - conf := ctconfig.DefaultConfig() - conf.Templates = templates.Copy() - - // Setup the Vault config - // Always set these to ensure nothing is picked up from the environment - conf.Vault.RenewToken = pointerutil.BoolPtr(false) - conf.Vault.Token = pointerutil.StringPtr("") - conf.Vault.Address = &mc.AgentConfig.Vault.Address - - if mc.Namespace != "" { - conf.Vault.Namespace = &mc.Namespace - } - - if mc.AgentConfig.TemplateConfig != nil && mc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { - conf.Vault.DefaultLeaseDuration = &mc.AgentConfig.TemplateConfig.StaticSecretRenderInt - } - - if mc.AgentConfig.DisableIdleConnsTemplating { - idleConns := -1 - conf.Vault.Transport.MaxIdleConns = &idleConns - } - - if mc.AgentConfig.DisableKeepAlivesTemplating { - conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) - } - - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(false), - Verify: pointerutil.BoolPtr(false), - Cert: pointerutil.StringPtr(""), - Key: pointerutil.StringPtr(""), - CaCert: pointerutil.StringPtr(""), - CaPath: pointerutil.StringPtr(""), - ServerName: pointerutil.StringPtr(""), - } - - // If Vault.Retry isn't specified, use the default of 12 retries. - // This retry value will be respected regardless of if we use the cache. - attempts := ctconfig.DefaultRetryAttempts - if mc.AgentConfig.Vault != nil && mc.AgentConfig.Vault.Retry != nil { - attempts = mc.AgentConfig.Vault.Retry.NumRetries - } - - // Use the cache if available or fallback to the Vault server values. - if mc.AgentConfig.Cache != nil { - if mc.AgentConfig.Cache.InProcDialer == nil { - return nil, fmt.Errorf("missing in-process dialer configuration") - } - if conf.Vault.Transport == nil { - conf.Vault.Transport = &ctconfig.TransportConfig{} - } - conf.Vault.Transport.CustomDialer = mc.AgentConfig.Cache.InProcDialer - // The in-process dialer ignores the address passed in, but we're still - // setting it here to override the setting at the top of this function, - // and to prevent the vault/http client from defaulting to https. - conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") - } else if strings.HasPrefix(mc.AgentConfig.Vault.Address, "https") || mc.AgentConfig.Vault.CACert != "" { - skipVerify := mc.AgentConfig.Vault.TLSSkipVerify - verify := !skipVerify - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(true), - Verify: &verify, - Cert: &mc.AgentConfig.Vault.ClientCert, - Key: &mc.AgentConfig.Vault.ClientKey, - CaCert: &mc.AgentConfig.Vault.CACert, - CaPath: &mc.AgentConfig.Vault.CAPath, - ServerName: &mc.AgentConfig.Vault.TLSServerName, - } - } - enabled := attempts > 0 - conf.Vault.Retry = &ctconfig.RetryConfig{ - Attempts: &attempts, - Enabled: &enabled, - } - - // Sync Consul Template's retry with user set auto-auth initial backoff value. - // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch - // secrets. - if mc.AgentConfig.AutoAuth != nil && mc.AgentConfig.AutoAuth.Method != nil { - if mc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { - conf.Vault.Retry.Backoff = &mc.AgentConfig.AutoAuth.Method.MinBackoff - } - - if mc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { - conf.Vault.Retry.MaxBackoff = &mc.AgentConfig.AutoAuth.Method.MaxBackoff - } - } - - conf.Finalize() - - // setup log level from TemplateServer config - conf.LogLevel = logLevelToStringPtr(mc.LogLevel) - - if err := ctlogging.Setup(&ctlogging.Config{ - Level: *conf.LogLevel, - Writer: mc.LogWriter, - }); err != nil { - return nil, err - } - return conf, nil -} - -// logLevelToString converts a go-hclog level to a matching, uppercase string -// value. It's used to convert Vault Agent's hclog level to a string version -// suitable for use in Consul Template's runner configuration input. -func logLevelToStringPtr(level hclog.Level) *string { - // consul template's default level is WARN, but Vault Agent's default is INFO, - // so we use that for the Runner's default. - var levelStr string - - switch level { - case hclog.Trace: - levelStr = "TRACE" - case hclog.Debug: - levelStr = "DEBUG" - case hclog.Warn: - levelStr = "WARN" - case hclog.Error: - levelStr = "ERR" - default: - levelStr = "INFO" - } - return pointerutil.StringPtr(levelStr) -} diff --git a/command/agent/jwt_end_to_end_test.go b/command/agent/jwt_end_to_end_test.go index 4739a65c314a4..c2d74d9f37dc0 100644 --- a/command/agent/jwt_end_to_end_test.go +++ b/command/agent/jwt_end_to_end_test.go @@ -1,12 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( "context" "encoding/json" - "fmt" + "io/ioutil" "os" "testing" "time" @@ -14,10 +11,10 @@ import ( hclog "github.com/hashicorp/go-hclog" vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentjwt "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agent/auth" + agentjwt "github.com/hashicorp/vault/command/agent/auth/jwt" + "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agent/sink/file" "github.com/hashicorp/vault/helper/dhutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -27,32 +24,11 @@ import ( ) func TestJWTEndToEnd(t *testing.T) { - t.Parallel() - testCases := []struct { - ahWrapping bool - useSymlink bool - removeJWTAfterReading bool - }{ - {false, false, false}, - {true, false, false}, - {false, true, false}, - {true, true, false}, - {false, false, true}, - {true, false, true}, - {false, true, true}, - {true, true, true}, - } - - for _, tc := range testCases { - tc := tc // capture range variable - t.Run(fmt.Sprintf("ahWrapping=%v, useSymlink=%v, removeJWTAfterReading=%v", tc.ahWrapping, tc.useSymlink, tc.removeJWTAfterReading), func(t *testing.T) { - t.Parallel() - testJWTEndToEnd(t, tc.ahWrapping, tc.useSymlink, tc.removeJWTAfterReading) - }) - } + testJWTEndToEnd(t, false) + testJWTEndToEnd(t, true) } -func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading bool) { +func testJWTEndToEnd(t *testing.T, ahWrapping bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ Logger: logger, @@ -107,24 +83,16 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading // We close these right away because we're just basically testing // permissions and finding a usable file name - inf, err := os.CreateTemp("", "auth.jwt.test.") + inf, err := ioutil.TempFile("", "auth.jwt.test.") if err != nil { t.Fatal(err) } in := inf.Name() inf.Close() os.Remove(in) - symlink, err := os.CreateTemp("", "auth.jwt.symlink.test.") - if err != nil { - t.Fatal(err) - } - symlinkName := symlink.Name() - symlink.Close() - os.Remove(symlinkName) - os.Symlink(in, symlinkName) t.Logf("input: %s", in) - ouf, err := os.CreateTemp("", "auth.tokensink.test.") + ouf, err := ioutil.TempFile("", "auth.tokensink.test.") if err != nil { t.Fatal(err) } @@ -133,7 +101,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading os.Remove(out) t.Logf("output: %s", out) - dhpathf, err := os.CreateTemp("", "auth.dhpath.test.") + dhpathf, err := ioutil.TempFile("", "auth.dhpath.test.") if err != nil { t.Fatal(err) } @@ -148,7 +116,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading if err != nil { t.Fatal(err) } - if err := os.WriteFile(dhpath, mPubKey, 0o600); err != nil { + if err := ioutil.WriteFile(dhpath, mPubKey, 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote dh param file", "path", dhpath) @@ -156,21 +124,12 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - var fileNameToUseAsPath string - if useSymlink { - fileNameToUseAsPath = symlinkName - } else { - fileNameToUseAsPath = in - } am, err := agentjwt.NewJWTAuthMethod(&auth.AuthConfig{ Logger: logger.Named("auth.jwt"), MountPath: "auth/jwt", Config: map[string]interface{}{ - "path": fileNameToUseAsPath, - "role": "test", - "remove_jwt_after_reading": removeJWTAfterReading, - "remove_jwt_follows_symlinks": true, - "jwt_read_period": "0.5s", + "path": in, + "role": "test", }, }) if err != nil { @@ -266,8 +225,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading // Get a token jwtToken, _ := GetTestJWT(t) - - if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test jwt", "path", in) @@ -279,29 +237,13 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading if time.Now().After(timeout) { t.Fatal("did not find a written token after timeout") } - val, err := os.ReadFile(out) + val, err := ioutil.ReadFile(out) if err == nil { os.Remove(out) if len(val) == 0 { t.Fatal("written token was empty") } - // First, ensure JWT has been removed - if removeJWTAfterReading { - _, err = os.Stat(in) - if err == nil { - t.Fatal("no error returned from stat, indicating the jwt is still present") - } - if !os.IsNotExist(err) { - t.Fatalf("unexpected error: %v", err) - } - } else { - _, err := os.Stat(in) - if err != nil { - t.Fatal("JWT file removed despite removeJWTAfterReading being set to false") - } - } - // First decrypt it resp := new(dhutil.Envelope) if err := jsonutil.DecodeJSON(val, resp); err != nil { @@ -394,7 +336,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading // Get another token to test the backend pushing the need to authenticate // to the handler jwtToken, _ = GetTestJWT(t) - if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } diff --git a/command/agent/oci_end_to_end_test.go b/command/agent/oci_end_to_end_test.go deleted file mode 100644 index 2349f09abe8d1..0000000000000 --- a/command/agent/oci_end_to_end_test.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package agent - -import ( - "context" - "io/ioutil" - "os" - "testing" - "time" - - hclog "github.com/hashicorp/go-hclog" - vaultoci "github.com/hashicorp/vault-plugin-auth-oci" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - agentoci "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/helper/testhelpers" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -const ( - envVarOCITestTenancyOCID = "OCI_TEST_TENANCY_OCID" - envVarOCITestUserOCID = "OCI_TEST_USER_OCID" - envVarOCITestFingerprint = "OCI_TEST_FINGERPRINT" - envVarOCITestPrivateKeyPath = "OCI_TEST_PRIVATE_KEY_PATH" - envVAROCITestOCIDList = "OCI_TEST_OCID_LIST" - - // The OCI SDK doesn't export its standard env vars so they're captured here. - // These are used for the duration of the test to make sure the agent is able to - // pick up creds from the env. - // - // To run this test, do not set these. Only the above ones need to be set. - envVarOCITenancyOCID = "OCI_tenancy_ocid" - envVarOCIUserOCID = "OCI_user_ocid" - envVarOCIFingerprint = "OCI_fingerprint" - envVarOCIPrivateKeyPath = "OCI_private_key_path" -) - -func TestOCIEndToEnd(t *testing.T) { - if !runAcceptanceTests { - t.SkipNow() - } - - // Ensure each cred is populated. - credNames := []string{ - envVarOCITestTenancyOCID, - envVarOCITestUserOCID, - envVarOCITestFingerprint, - envVarOCITestPrivateKeyPath, - envVAROCITestOCIDList, - } - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - - logger := logging.NewVaultLogger(hclog.Trace) - coreConfig := &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "oci": vaultoci.Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - client := cluster.Cores[0].Client - - // Setup Vault - if err := client.Sys().EnableAuthWithOptions("oci", &api.EnableAuthOptions{ - Type: "oci", - }); err != nil { - t.Fatal(err) - } - - if _, err := client.Logical().Write("auth/oci/config", map[string]interface{}{ - "home_tenancy_id": os.Getenv(envVarOCITestTenancyOCID), - }); err != nil { - t.Fatal(err) - } - - if _, err := client.Logical().Write("auth/oci/role/test", map[string]interface{}{ - "ocid_list": os.Getenv(envVAROCITestOCIDList), - }); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - - // We're going to feed oci auth creds via env variables. - if err := setOCIEnvCreds(); err != nil { - t.Fatal(err) - } - defer func() { - if err := unsetOCIEnvCreds(); err != nil { - t.Fatal(err) - } - }() - - vaultAddr := "http://" + cluster.Cores[0].Listeners[0].Addr().String() - - am, err := agentoci.NewOCIAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.oci"), - MountPath: "auth/oci", - Config: map[string]interface{}{ - "type": "apikey", - "role": "test", - }, - }, vaultAddr) - if err != nil { - t.Fatal(err) - } - - ahConfig := &auth.AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), - Client: client, - } - - ah := auth.NewAuthHandler(ahConfig) - errCh := make(chan error) - go func() { - errCh <- ah.Run(ctx, am) - }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() - - tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") - if err != nil { - t.Fatal(err) - } - tokenSinkFileName := tmpFile.Name() - tmpFile.Close() - os.Remove(tokenSinkFileName) - t.Logf("output: %s", tokenSinkFileName) - - config := &sink.SinkConfig{ - Logger: logger.Named("sink.file"), - Config: map[string]interface{}{ - "path": tokenSinkFileName, - }, - WrapTTL: 10 * time.Second, - } - - fs, err := file.NewFileSink(config) - if err != nil { - t.Fatal(err) - } - config.Sink = fs - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: logger.Named("sink.server"), - Client: client, - }) - go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) - }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() - - // This has to be after the other defers so it happens first. It allows - // successful test runs to immediately cancel all of the runner goroutines - // and unblock any of the blocking defer calls by the runner's DoneCh that - // comes before this and avoid successful tests from taking the entire - // timeout duration. - defer cancel() - - if stat, err := os.Lstat(tokenSinkFileName); err == nil { - t.Fatalf("expected err but got %s", stat) - } else if !os.IsNotExist(err) { - t.Fatal("expected notexist err") - } - - // Wait 2 seconds for the env variables to be detected and an auth to be generated. - time.Sleep(time.Second * 2) - - token, err := readToken(tokenSinkFileName) - if err != nil { - t.Fatal(err) - } - - if token.Token == "" { - t.Fatal("expected token but didn't receive it") - } -} - -func setOCIEnvCreds() error { - if err := os.Setenv(envVarOCITenancyOCID, os.Getenv(envVarOCITestTenancyOCID)); err != nil { - return err - } - if err := os.Setenv(envVarOCIUserOCID, os.Getenv(envVarOCITestUserOCID)); err != nil { - return err - } - if err := os.Setenv(envVarOCIFingerprint, os.Getenv(envVarOCITestFingerprint)); err != nil { - return err - } - return os.Setenv(envVarOCIPrivateKeyPath, os.Getenv(envVarOCITestPrivateKeyPath)) -} - -func unsetOCIEnvCreds() error { - if err := os.Unsetenv(envVarOCITenancyOCID); err != nil { - return err - } - if err := os.Unsetenv(envVarOCIUserOCID); err != nil { - return err - } - if err := os.Unsetenv(envVarOCIFingerprint); err != nil { - return err - } - return os.Unsetenv(envVarOCIPrivateKeyPath) -} diff --git a/command/agentproxyshared/sink/file/file_sink.go b/command/agent/sink/file/file_sink.go similarity index 96% rename from command/agentproxyshared/sink/file/file_sink.go rename to command/agent/sink/file/file_sink.go index c25d99169aa01..f2faf5641797d 100644 --- a/command/agentproxyshared/sink/file/file_sink.go +++ b/command/agent/sink/file/file_sink.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( @@ -12,7 +9,7 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agent/sink" ) // fileSink is a Sink implementation that writes a token to a file diff --git a/command/agentproxyshared/sink/file/file_sink_test.go b/command/agent/sink/file/file_sink_test.go similarity index 95% rename from command/agentproxyshared/sink/file/file_sink_test.go rename to command/agent/sink/file/file_sink_test.go index 95db8df19b72b..9749522b49310 100644 --- a/command/agentproxyshared/sink/file/file_sink_test.go +++ b/command/agent/sink/file/file_sink_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( @@ -12,7 +9,7 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agent/sink" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agentproxyshared/sink/file/sink_test.go b/command/agent/sink/file/sink_test.go similarity index 95% rename from command/agentproxyshared/sink/file/sink_test.go rename to command/agent/sink/file/sink_test.go index de074003bfd37..839340f0c88d9 100644 --- a/command/agentproxyshared/sink/file/sink_test.go +++ b/command/agent/sink/file/sink_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( @@ -15,7 +12,7 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agent/sink" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agentproxyshared/sink/inmem/inmem_sink.go b/command/agent/sink/inmem/inmem_sink.go similarity index 81% rename from command/agentproxyshared/sink/inmem/inmem_sink.go rename to command/agent/sink/inmem/inmem_sink.go index e5804d884bad4..2dfa09115ca73 100644 --- a/command/agentproxyshared/sink/inmem/inmem_sink.go +++ b/command/agent/sink/inmem/inmem_sink.go @@ -1,14 +1,11 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( "errors" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agent/cache" + "github.com/hashicorp/vault/command/agent/sink" "go.uber.org/atomic" ) diff --git a/command/agent/sink/mock/mock_sink.go b/command/agent/sink/mock/mock_sink.go new file mode 100644 index 0000000000000..fb4720cc17ab2 --- /dev/null +++ b/command/agent/sink/mock/mock_sink.go @@ -0,0 +1,24 @@ +package mock + +import ( + "github.com/hashicorp/vault/command/agent/sink" +) + +type mockSink struct { + token string +} + +func NewSink(token string) sink.Sink { + return &mockSink{ + token: token, + } +} + +func (m *mockSink) WriteToken(token string) error { + m.token = token + return nil +} + +func (m *mockSink) Token() string { + return m.token +} diff --git a/command/agentproxyshared/sink/sink.go b/command/agent/sink/sink.go similarity index 97% rename from command/agentproxyshared/sink/sink.go rename to command/agent/sink/sink.go index 2b64c1762781c..75ea91dc306b6 100644 --- a/command/agentproxyshared/sink/sink.go +++ b/command/agent/sink/sink.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package sink import ( @@ -154,12 +151,10 @@ func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*Si if err := writeSink(st.sink, st.token); err != nil { backoff := 2*time.Second + time.Duration(ss.random.Int63()%int64(time.Second*2)-int64(time.Second)) ss.logger.Error("error returned by sink function, retrying", "error", err, "backoff", backoff.String()) - timer := time.NewTimer(backoff) select { case <-ctx.Done(): - timer.Stop() return nil - case <-timer.C: + case <-time.After(backoff): atomic.AddInt32(ss.remaining, 1) sinkCh <- st } diff --git a/command/agent/template/template.go b/command/agent/template/template.go index be3ccc4eab64d..32e9022bb78bd 100644 --- a/command/agent/template/template.go +++ b/command/agent/template/template.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Package template is responsible for rendering user supplied templates to // disk. The Server type accepts configuration to communicate to a Vault server // and a Vault token for authentication. Internally, the Server creates a Consul @@ -13,16 +10,15 @@ import ( "errors" "fmt" "io" + "strings" "go.uber.org/atomic" ctconfig "github.com/hashicorp/consul-template/config" + ctlogging "github.com/hashicorp/consul-template/logging" "github.com/hashicorp/consul-template/manager" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/internal/ctmanager" - "github.com/hashicorp/vault/helper/useragent" "github.com/hashicorp/vault/sdk/helper/pointerutil" ) @@ -111,14 +107,8 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct // configuration var runnerConfig *ctconfig.Config var runnerConfigErr error - managerConfig := ctmanager.ManagerConfig{ - AgentConfig: ts.config.AgentConfig, - Namespace: ts.config.Namespace, - LogLevel: ts.config.LogLevel, - LogWriter: ts.config.LogWriter, - } - runnerConfig, runnerConfigErr = ctmanager.NewConfig(managerConfig, templates) - if runnerConfigErr != nil { + + if runnerConfig, runnerConfigErr = newRunnerConfig(ts.config, templates); runnerConfigErr != nil { return fmt.Errorf("template server failed to runner generate config: %w", runnerConfigErr) } @@ -167,8 +157,7 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct *latestToken = token ctv := ctconfig.Config{ Vault: &ctconfig.VaultConfig{ - Token: latestToken, - ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), + Token: latestToken, }, } @@ -235,3 +224,131 @@ func (ts *Server) Stop() { close(ts.DoneCh) } } + +// newRunnerConfig returns a consul-template runner configuration, setting the +// Vault and Consul configurations based on the clients configs. +func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + conf := ctconfig.DefaultConfig() + conf.Templates = templates.Copy() + + // Setup the Vault config + // Always set these to ensure nothing is picked up from the environment + conf.Vault.RenewToken = pointerutil.BoolPtr(false) + conf.Vault.Token = pointerutil.StringPtr("") + conf.Vault.Address = &sc.AgentConfig.Vault.Address + + if sc.Namespace != "" { + conf.Vault.Namespace = &sc.Namespace + } + + if sc.AgentConfig.TemplateConfig != nil && sc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { + conf.Vault.DefaultLeaseDuration = &sc.AgentConfig.TemplateConfig.StaticSecretRenderInt + } + + if sc.AgentConfig.DisableIdleConnsTemplating { + idleConns := -1 + conf.Vault.Transport.MaxIdleConns = &idleConns + } + + if sc.AgentConfig.DisableKeepAlivesTemplating { + conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) + } + + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(false), + Verify: pointerutil.BoolPtr(false), + Cert: pointerutil.StringPtr(""), + Key: pointerutil.StringPtr(""), + CaCert: pointerutil.StringPtr(""), + CaPath: pointerutil.StringPtr(""), + ServerName: pointerutil.StringPtr(""), + } + + // We need to assign something to Vault.Retry or it will use its default of 12 retries. + // This retry value will be respected regardless of if we use the cache. + var attempts int + if sc.AgentConfig.Vault != nil && sc.AgentConfig.Vault.Retry != nil { + attempts = sc.AgentConfig.Vault.Retry.NumRetries + } + + // Use the cache if available or fallback to the Vault server values. + if sc.AgentConfig.Cache != nil { + if sc.AgentConfig.Cache.InProcDialer == nil { + return nil, fmt.Errorf("missing in-process dialer configuration") + } + if conf.Vault.Transport == nil { + conf.Vault.Transport = &ctconfig.TransportConfig{} + } + conf.Vault.Transport.CustomDialer = sc.AgentConfig.Cache.InProcDialer + // The in-process dialer ignores the address passed in, but we're still + // setting it here to override the setting at the top of this function, + // and to prevent the vault/http client from defaulting to https. + conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") + } else if strings.HasPrefix(sc.AgentConfig.Vault.Address, "https") || sc.AgentConfig.Vault.CACert != "" { + skipVerify := sc.AgentConfig.Vault.TLSSkipVerify + verify := !skipVerify + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(true), + Verify: &verify, + Cert: &sc.AgentConfig.Vault.ClientCert, + Key: &sc.AgentConfig.Vault.ClientKey, + CaCert: &sc.AgentConfig.Vault.CACert, + CaPath: &sc.AgentConfig.Vault.CAPath, + ServerName: &sc.AgentConfig.Vault.TLSServerName, + } + } + enabled := attempts > 0 + conf.Vault.Retry = &ctconfig.RetryConfig{ + Attempts: &attempts, + Enabled: &enabled, + } + + // Sync Consul Template's retry with user set auto-auth initial backoff value. + // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch + // secrets. + if sc.AgentConfig.AutoAuth != nil && sc.AgentConfig.AutoAuth.Method != nil { + if sc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { + conf.Vault.Retry.Backoff = &sc.AgentConfig.AutoAuth.Method.MinBackoff + } + + if sc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { + conf.Vault.Retry.MaxBackoff = &sc.AgentConfig.AutoAuth.Method.MaxBackoff + } + } + + conf.Finalize() + + // setup log level from TemplateServer config + conf.LogLevel = logLevelToStringPtr(sc.LogLevel) + + if err := ctlogging.Setup(&ctlogging.Config{ + Level: *conf.LogLevel, + Writer: sc.LogWriter, + }); err != nil { + return nil, err + } + return conf, nil +} + +// logLevelToString converts a go-hclog level to a matching, uppercase string +// value. It's used to convert Vault Agent's hclog level to a string version +// suitable for use in Consul Template's runner configuration input. +func logLevelToStringPtr(level hclog.Level) *string { + // consul template's default level is WARN, but Vault Agent's default is INFO, + // so we use that for the Runner's default. + var levelStr string + + switch level { + case hclog.Trace: + levelStr = "TRACE" + case hclog.Debug: + levelStr = "DEBUG" + case hclog.Warn: + levelStr = "WARN" + case hclog.Error: + levelStr = "ERR" + default: + levelStr = "INFO" + } + return pointerutil.StringPtr(levelStr) +} diff --git a/command/agent/template/template_test.go b/command/agent/template/template_test.go index 61822bc5bd3b0..25999945f99d3 100644 --- a/command/agent/template/template_test.go +++ b/command/agent/template/template_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package template import ( @@ -17,8 +14,6 @@ import ( ctconfig "github.com/hashicorp/consul-template/config" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/internal/ctmanager" - "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" "github.com/hashicorp/vault/sdk/helper/logging" @@ -28,14 +23,6 @@ import ( "google.golang.org/grpc/test/bufconn" ) -func newRunnerConfig(s *ServerConfig, configs ctconfig.TemplateConfigs) (*ctconfig.Config, error) { - managerCfg := ctmanager.ManagerConfig{ - AgentConfig: s.AgentConfig, - } - cfg, err := ctmanager.NewConfig(managerCfg, configs) - return cfg, err -} - // TestNewServer is a simple test to make sure NewServer returns a Server and // channel func TestNewServer(t *testing.T) { @@ -88,7 +75,7 @@ func newAgentConfig(listeners []*configutil.Listener, enableCache, enablePersise } if enablePersisentCache { - agentConfig.Cache.Persist = &agentproxyshared.PersistConfig{Type: "kubernetes"} + agentConfig.Cache.Persist = &config.Persist{Type: "kubernetes"} } return agentConfig diff --git a/command/agent/test-fixtures/reload/reload_bar.key b/command/agent/test-fixtures/reload/reload_bar.key deleted file mode 100644 index 10849fbe1d7f3..0000000000000 --- a/command/agent/test-fixtures/reload/reload_bar.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju -Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj -7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl -/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz -q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 -XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ -ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF -V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q -g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ -zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt -V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC -is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS -Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU -8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB -1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L -m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti -y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ -XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z -kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 -qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX -Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft -b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT -9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH -4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab -JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil ------END RSA PRIVATE KEY----- diff --git a/command/agent/test-fixtures/reload/reload_bar.pem b/command/agent/test-fixtures/reload/reload_bar.pem deleted file mode 100644 index a8217be5c7dfd..0000000000000 --- a/command/agent/test-fixtures/reload/reload_bar.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw -MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon -mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm -MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy -uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e -e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c -NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw -gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F -7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl -vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL -BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw -SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP -UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC -a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q -W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj -RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= ------END CERTIFICATE----- diff --git a/command/agent/test-fixtures/reload/reload_ca.pem b/command/agent/test-fixtures/reload/reload_ca.pem deleted file mode 100644 index 72a74440c4820..0000000000000 --- a/command/agent/test-fixtures/reload/reload_ca.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw -MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k -JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM -SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ -VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ -9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad -KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD -VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb -U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG -A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 -hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX -Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf -oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 -Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a -mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 -XRz2GCwCNyvW ------END CERTIFICATE----- diff --git a/command/agent/test-fixtures/reload/reload_foo.key b/command/agent/test-fixtures/reload/reload_foo.key deleted file mode 100644 index 86e6cce63e647..0000000000000 --- a/command/agent/test-fixtures/reload/reload_foo.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i -ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX -xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A -A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc -gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g -Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV -I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io -yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds -a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey -szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX -Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU -02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK -BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ -LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa -69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L -M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 -Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV -gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ -p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X -PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ -3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO -FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 -bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT -jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa -5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P ------END RSA PRIVATE KEY----- diff --git a/command/agent/test-fixtures/reload/reload_foo.pem b/command/agent/test-fixtures/reload/reload_foo.pem deleted file mode 100644 index c8b868bcd0f08..0000000000000 --- a/command/agent/test-fixtures/reload/reload_foo.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw -MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D -j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 -bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 -EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 -sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l -8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw -gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ -dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl -vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL -BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy -fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc -sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh -RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 -oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene -Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= ------END CERTIFICATE----- diff --git a/command/agent/testing.go b/command/agent/testing.go index 04a2f0608a6c6..d4de988a98575 100644 --- a/command/agent/testing.go +++ b/command/agent/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package agent import ( @@ -9,14 +6,14 @@ import ( "crypto/x509" "encoding/json" "encoding/pem" + "io/ioutil" "os" "testing" "time" - "github.com/go-jose/go-jose/v3" - "github.com/go-jose/go-jose/v3/jwt" - "github.com/hashicorp/vault/sdk/logical" + jose "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" ) const envVarRunAccTests = "VAULT_ACC" @@ -64,7 +61,7 @@ func GetTestJWT(t *testing.T) (string, *ecdsa.PrivateKey) { } func readToken(fileName string) (*logical.HTTPWrapInfo, error) { - b, err := os.ReadFile(fileName) + b, err := ioutil.ReadFile(fileName) if err != nil { return nil, err } diff --git a/command/agent/token_file_end_to_end_test.go b/command/agent/token_file_end_to_end_test.go deleted file mode 100644 index dc7115cb18f4c..0000000000000 --- a/command/agent/token_file_end_to_end_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package agent - -import ( - "context" - "os" - "path/filepath" - "testing" - "time" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/vault" -) - -func TestTokenFileEndToEnd(t *testing.T) { - var err error - logger := logging.NewVaultLogger(log.Trace) - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - - vault.TestWaitActive(t, cores[0].Core) - - client := cores[0].Client - - secret, err := client.Auth().Token().Create(nil) - if err != nil || secret == nil { - t.Fatal(err) - } - - tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file")) - if err != nil { - t.Fatal(err) - } - tokenFileName := tokenFile.Name() - tokenFile.Close() // WriteFile doesn't need it open - os.WriteFile(tokenFileName, []byte(secret.Auth.ClientToken), 0o666) - defer os.Remove(tokenFileName) - - ahConfig := &auth.AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), - Client: client, - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - - am, err := token_file.NewTokenFileAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.method"), - Config: map[string]interface{}{ - "token_file_path": tokenFileName, - }, - }) - if err != nil { - t.Fatal(err) - } - - ah := auth.NewAuthHandler(ahConfig) - errCh := make(chan error) - go func() { - errCh <- ah.Run(ctx, am) - }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() - - // We close these right away because we're just basically testing - // permissions and finding a usable file name - sinkFile, err := os.Create(filepath.Join(t.TempDir(), "auth.tokensink.test.")) - if err != nil { - t.Fatal(err) - } - tokenSinkFileName := sinkFile.Name() - sinkFile.Close() - os.Remove(tokenSinkFileName) - t.Logf("output: %s", tokenSinkFileName) - - config := &sink.SinkConfig{ - Logger: logger.Named("sink.file"), - Config: map[string]interface{}{ - "path": tokenSinkFileName, - }, - WrapTTL: 10 * time.Second, - } - - fs, err := file.NewFileSink(config) - if err != nil { - t.Fatal(err) - } - config.Sink = fs - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: logger.Named("sink.server"), - Client: client, - }) - go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) - }() - defer func() { - select { - case <-ctx.Done(): - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - }() - - // This has to be after the other defers, so it happens first. It allows - // successful test runs to immediately cancel all of the runner goroutines - // and unblock any of the blocking defer calls by the runner's DoneCh that - // comes before this and avoid successful tests from taking the entire - // timeout duration. - defer cancel() - - if stat, err := os.Lstat(tokenSinkFileName); err == nil { - t.Fatalf("expected err but got %s", stat) - } else if !os.IsNotExist(err) { - t.Fatal("expected notexist err") - } - - // Wait 2 seconds for the env variables to be detected and an auth to be generated. - time.Sleep(time.Second * 2) - - token, err := readToken(tokenSinkFileName) - if err != nil { - t.Fatal(err) - } - - if token.Token == "" { - t.Fatal("expected token but didn't receive it") - } - - _, err = os.Stat(tokenFileName) - if err != nil { - t.Fatal("Token file removed") - } -} diff --git a/command/agentproxyshared/winsvc/service.go b/command/agent/winsvc/service.go similarity index 76% rename from command/agentproxyshared/winsvc/service.go rename to command/agent/winsvc/service.go index edd234e0c57d2..c8d21f5c7d0af 100644 --- a/command/agentproxyshared/winsvc/service.go +++ b/command/agent/winsvc/service.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package winsvc var chanGraceExit = make(chan int) diff --git a/command/agentproxyshared/winsvc/service_windows.go b/command/agent/winsvc/service_windows.go similarity index 93% rename from command/agentproxyshared/winsvc/service_windows.go rename to command/agent/winsvc/service_windows.go index bb16bf97aeea2..69177e01fd66d 100644 --- a/command/agentproxyshared/winsvc/service_windows.go +++ b/command/agent/winsvc/service_windows.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build windows package winsvc diff --git a/command/agent_generate_config.go b/command/agent_generate_config.go deleted file mode 100644 index 5c42d0e59cf8c..0000000000000 --- a/command/agent_generate_config.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "context" - "fmt" - "io" - "os" - paths "path" - "sort" - "strings" - "unicode" - - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/mitchellh/go-homedir" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*AgentGenerateConfigCommand)(nil) - _ cli.CommandAutocomplete = (*AgentGenerateConfigCommand)(nil) -) - -type AgentGenerateConfigCommand struct { - *BaseCommand - - flagType string - flagPaths []string - flagExec string -} - -func (c *AgentGenerateConfigCommand) Synopsis() string { - return "Generate a Vault Agent configuration file." -} - -func (c *AgentGenerateConfigCommand) Help() string { - helpText := ` -Usage: vault agent generate-config [options] [path/to/config.hcl] - - Generates a simple Vault Agent configuration file from the given parameters. - - Currently, the only supported configuration type is 'env-template', which - helps you generate a configuration file with environment variable templates - for running Vault Agent in process supervisor mode. - - For every specified secret -path, the command will attempt to generate one or - multiple 'env_template' entries based on the JSON key(s) stored in the - specified secret. If the secret -path ends with '/*', the command will - attempt to recurse through the secrets tree rooted at the given path, - generating 'env_template' entries for each encountered secret. Currently, - only kv-v1 and kv-v2 paths are supported. - - The command specified in the '-exec' option will be used to generate an - 'exec' entry, which will tell Vault Agent which child process to run. - - In addition to env_template entries, the command generates an 'auto_auth' - section with 'token_file' authentication method. While this method is very - convenient for local testing, it should NOT be used in production. Please - see https://developer.hashicorp.com/vault/docs/agentandproxy/autoauth for - a list of production-ready auto_auth methods that you can use instead. - - By default, the file will be generated in the local directory as 'agent.hcl' - unless a path is specified as an argument. - - Generate a simple environment variable template configuration: - - $ vault agent generate-config -type="env-template" \ - -exec="./my-app arg1 arg2" \ - -path="secret/foo" - - Generate an environment variable template configuration for multiple secrets: - - $ vault agent generate-config -type="env-template" \ - -exec="./my-app arg1 arg2" \ - -path="secret/foo" \ - -path="secret/bar" \ - -path="secret/my-app/*" - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *AgentGenerateConfigCommand) Flags() *FlagSets { - set := NewFlagSets(c.UI) - - // Common Options - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "type", - Target: &c.flagType, - Usage: "Type of configuration file to generate; currently, only 'env-template' is supported.", - Completion: complete.PredictSet( - "env-template", - ), - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "path", - Target: &c.flagPaths, - Usage: "Path to a kv-v1 or kv-v2 secret (e.g. secret/data/foo, kv-v2/prefix/*); multiple secrets and tail '*' wildcards are allowed.", - Completion: c.PredictVaultFolders(), - }) - - f.StringVar(&StringVar{ - Name: "exec", - Target: &c.flagExec, - Default: "env", - Usage: "The command to execute in agent process supervisor mode.", - }) - - return set -} - -func (c *AgentGenerateConfigCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *AgentGenerateConfigCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *AgentGenerateConfigCommand) Run(args []string) int { - flags := c.Flags() - - if err := flags.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = flags.Args() - - if len(args) > 1 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected at most 1, got %d)", len(args))) - return 1 - } - - if c.flagType == "" { - c.UI.Error(`Please specify a -type flag; currently only -type="env-template" is supported.`) - return 1 - } - - if c.flagType != "env-template" { - c.UI.Error(fmt.Sprintf(`%q is not a supported configuration type; currently only -type="env-template" is supported.`, c.flagType)) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - config, err := generateConfiguration(context.Background(), client, c.flagExec, c.flagPaths) - if err != nil { - c.UI.Error(fmt.Sprintf("Error: %v", err)) - return 2 - } - - var configPath string - if len(args) == 1 { - configPath = args[0] - } else { - configPath = "agent.hcl" - } - - f, err := os.Create(configPath) - if err != nil { - c.UI.Error(fmt.Sprintf("Could not create configuration file %q: %v", configPath, err)) - return 3 - } - defer func() { - if err := f.Close(); err != nil { - c.UI.Error(fmt.Sprintf("Could not close configuration file %q: %v", configPath, err)) - } - }() - - if _, err := config.WriteTo(f); err != nil { - c.UI.Error(fmt.Sprintf("Could not write to configuration file %q: %v", configPath, err)) - return 3 - } - - c.UI.Info(fmt.Sprintf("Successfully generated %q configuration file!", configPath)) - - c.UI.Warn("Warning: the generated file uses 'token_file' authentication method, which is not suitable for production environments.") - - return 0 -} - -func generateConfiguration(ctx context.Context, client *api.Client, flagExec string, flagPaths []string) (io.WriterTo, error) { - var execCommand []string - if flagExec != "" { - execCommand = strings.Split(flagExec, " ") - } else { - execCommand = []string{"env"} - } - - tokenPath, err := homedir.Expand("~/.vault-token") - if err != nil { - return nil, fmt.Errorf("could not expand home directory: %w", err) - } - - templates, err := constructTemplates(ctx, client, flagPaths) - if err != nil { - return nil, fmt.Errorf("could not generate templates: %w", err) - } - - config := generatedConfig{ - AutoAuth: generatedConfigAutoAuth{ - Method: generatedConfigAutoAuthMethod{ - Type: "token_file", - Config: generatedConfigAutoAuthMethodConfig{ - TokenFilePath: tokenPath, - }, - }, - }, - TemplateConfig: generatedConfigTemplateConfig{ - StaticSecretRenderInterval: "5m", - ExitOnRetryFailure: true, - }, - Vault: generatedConfigVault{ - Address: client.Address(), - }, - Exec: generatedConfigExec{ - Command: execCommand, - RestartOnSecretChanges: "always", - RestartStopSignal: "SIGTERM", - }, - EnvTemplates: templates, - } - - contents := hclwrite.NewEmptyFile() - - gohcl.EncodeIntoBody(&config, contents.Body()) - - return contents, nil -} - -func constructTemplates(ctx context.Context, client *api.Client, paths []string) ([]generatedConfigEnvTemplate, error) { - var templates []generatedConfigEnvTemplate - - for _, path := range paths { - path = sanitizePath(path) - - mountPath, v2, err := isKVv2(path, client) - if err != nil { - return nil, fmt.Errorf("could not validate secret path %q: %w", path, err) - } - - switch { - case strings.HasSuffix(path, "/*"): - // this path contains a tail wildcard, attempt to walk the tree - t, err := constructTemplatesFromTree(ctx, client, path[:len(path)-2], mountPath, v2) - if err != nil { - return nil, fmt.Errorf("could not traverse sercet at %q: %w", path, err) - } - templates = append(templates, t...) - - case strings.Contains(path, "*"): - // don't allow any other wildcards - return nil, fmt.Errorf("the path %q cannot contain '*' wildcard characters except as the last element of the path", path) - - default: - // regular secret path - t, err := constructTemplatesFromSecret(ctx, client, path, mountPath, v2) - if err != nil { - return nil, fmt.Errorf("could not read secret at %q: %v", path, err) - } - templates = append(templates, t...) - } - } - - return templates, nil -} - -func constructTemplatesFromTree(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { - var templates []generatedConfigEnvTemplate - - if v2 { - metadataPath := strings.Replace( - path, - paths.Join(mountPath, "data"), - paths.Join(mountPath, "metadata"), - 1, - ) - if path != metadataPath { - path = metadataPath - } else { - path = addPrefixToKVPath(path, mountPath, "metadata", true) - } - } - - err := walkSecretsTree(ctx, client, path, func(child string, directory bool) error { - if directory { - return nil - } - - dataPath := strings.Replace( - child, - paths.Join(mountPath, "metadata"), - paths.Join(mountPath, "data"), - 1, - ) - - t, err := constructTemplatesFromSecret(ctx, client, dataPath, mountPath, v2) - if err != nil { - return err - } - templates = append(templates, t...) - - return nil - }) - if err != nil { - return nil, err - } - - return templates, nil -} - -func constructTemplatesFromSecret(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { - var templates []generatedConfigEnvTemplate - - if v2 { - path = addPrefixToKVPath(path, mountPath, "data", true) - } - - resp, err := client.Logical().ReadWithContext(ctx, path) - if err != nil { - return nil, fmt.Errorf("error querying: %w", err) - } - if resp == nil { - return nil, fmt.Errorf("secret not found") - } - - var data map[string]interface{} - if v2 { - internal, ok := resp.Data["data"] - if !ok { - return nil, fmt.Errorf("secret.Data not found") - } - data = internal.(map[string]interface{}) - } else { - data = resp.Data - } - - fields := make([]string, 0, len(data)) - - for field := range data { - fields = append(fields, field) - } - - // sort for a deterministic output - sort.Strings(fields) - - var dataContents string - if v2 { - dataContents = ".Data.data" - } else { - dataContents = ".Data" - } - - for _, field := range fields { - templates = append(templates, generatedConfigEnvTemplate{ - Name: constructDefaultEnvironmentKey(path, field), - Contents: fmt.Sprintf(`{{ with secret "%s" }}{{ %s.%s }}{{ end }}`, path, dataContents, field), - ErrorOnMissingKey: true, - }) - } - - return templates, nil -} - -func constructDefaultEnvironmentKey(path string, field string) string { - pathParts := strings.Split(path, "/") - pathPartsLast := pathParts[len(pathParts)-1] - - notLetterOrNumber := func(r rune) bool { - return !unicode.IsLetter(r) && !unicode.IsNumber(r) - } - - p1 := strings.FieldsFunc(pathPartsLast, notLetterOrNumber) - p2 := strings.FieldsFunc(field, notLetterOrNumber) - - keyParts := append(p1, p2...) - - return strings.ToUpper(strings.Join(keyParts, "_")) -} - -// Below, we are redefining a subset of the configuration-related structures -// defined under command/agent/config. Using these structures we can tailor the -// output of the generated config, while using the original structures would -// have produced an HCL document with many empty fields. The structures below -// should not be used for anything other than generation. - -type generatedConfig struct { - AutoAuth generatedConfigAutoAuth `hcl:"auto_auth,block"` - TemplateConfig generatedConfigTemplateConfig `hcl:"template_config,block"` - Vault generatedConfigVault `hcl:"vault,block"` - EnvTemplates []generatedConfigEnvTemplate `hcl:"env_template,block"` - Exec generatedConfigExec `hcl:"exec,block"` -} - -type generatedConfigTemplateConfig struct { - StaticSecretRenderInterval string `hcl:"static_secret_render_interval"` - ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` -} - -type generatedConfigExec struct { - Command []string `hcl:"command"` - RestartOnSecretChanges string `hcl:"restart_on_secret_changes"` - RestartStopSignal string `hcl:"restart_stop_signal"` -} - -type generatedConfigEnvTemplate struct { - Name string `hcl:"name,label"` - Contents string `hcl:"contents,attr"` - ErrorOnMissingKey bool `hcl:"error_on_missing_key"` -} - -type generatedConfigVault struct { - Address string `hcl:"address"` -} - -type generatedConfigAutoAuth struct { - Method generatedConfigAutoAuthMethod `hcl:"method,block"` -} - -type generatedConfigAutoAuthMethod struct { - Type string `hcl:"type"` - Config generatedConfigAutoAuthMethodConfig `hcl:"config,block"` -} - -type generatedConfigAutoAuthMethodConfig struct { - TokenFilePath string `hcl:"token_file_path"` -} diff --git a/command/agent_generate_config_test.go b/command/agent_generate_config_test.go deleted file mode 100644 index f225a7c9e8cb4..0000000000000 --- a/command/agent_generate_config_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "context" - "reflect" - "regexp" - "testing" - "time" -) - -// TestConstructTemplates tests the construcTemplates helper function -func TestConstructTemplates(t *testing.T) { - ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelContextFunc() - - client, closer := testVaultServerWithSecrets(ctx, t) - defer closer() - - cases := map[string]struct { - paths []string - expected []generatedConfigEnvTemplate - expectedError bool - }{ - "kv-v1-simple": { - paths: []string{"kv-v1/foo"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - }, - expectedError: false, - }, - - "kv-v2-simple": { - paths: []string{"kv-v2/foo"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - }, - expectedError: false, - }, - - "kv-v2-data-in-path": { - paths: []string{"kv-v2/data/foo"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - }, - expectedError: false, - }, - - "kv-v1-nested": { - paths: []string{"kv-v1/app-1/*"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, - {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, - {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, - {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, - }, - expectedError: false, - }, - - "kv-v2-nested": { - paths: []string{"kv-v2/app-1/*"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, - {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, - }, - expectedError: false, - }, - - "kv-v1-multi-path": { - paths: []string{"kv-v1/foo", "kv-v1/app-1/bar"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, - {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, - }, - expectedError: false, - }, - - "kv-v2-multi-path": { - paths: []string{"kv-v2/foo", "kv-v2/app-1/bar"}, - expected: []generatedConfigEnvTemplate{ - {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, - {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, - {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, - }, - expectedError: false, - }, - - "kv-v1-path-not-found": { - paths: []string{"kv-v1/does/not/exist"}, - expected: nil, - expectedError: true, - }, - - "kv-v2-path-not-found": { - paths: []string{"kv-v2/does/not/exist"}, - expected: nil, - expectedError: true, - }, - - "kv-v1-early-wildcard": { - paths: []string{"kv-v1/*/foo"}, - expected: nil, - expectedError: true, - }, - - "kv-v2-early-wildcard": { - paths: []string{"kv-v2/*/foo"}, - expected: nil, - expectedError: true, - }, - } - - for name, tc := range cases { - name, tc := name, tc - - t.Run(name, func(t *testing.T) { - templates, err := constructTemplates(ctx, client, tc.paths) - - if tc.expectedError { - if err == nil { - t.Fatal("an error was expected but the test succeeded") - } - } else { - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(tc.expected, templates) { - t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, templates) - } - } - }) - } -} - -// TestGenerateConfiguration tests the generateConfiguration helper function -func TestGenerateConfiguration(t *testing.T) { - ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelContextFunc() - - client, closer := testVaultServerWithSecrets(ctx, t) - defer closer() - - cases := map[string]struct { - flagExec string - flagPaths []string - expected *regexp.Regexp - expectedError bool - }{ - "kv-v1-simple": { - flagExec: "./my-app arg1 arg2", - flagPaths: []string{"kv-v1/foo"}, - expected: regexp.MustCompile(` -auto_auth \{ - - method \{ - type = "token_file" - - config \{ - token_file_path = ".*/.vault-token" - } - } -} - -template_config \{ - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault \{ - address = "https://127.0.0.1:[0-9]{5}" -} - -env_template "FOO_PASSWORD" \{ - contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.password }}\{\{ end }}" - error_on_missing_key = true -} -env_template "FOO_USER" \{ - contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.user }}\{\{ end }}" - error_on_missing_key = true -} - -exec \{ - command = \["./my-app", "arg1", "arg2"\] - restart_on_secret_changes = "always" - restart_stop_signal = "SIGTERM" -} -`), - expectedError: false, - }, - - "kv-v2-default-exec": { - flagExec: "", - flagPaths: []string{"kv-v2/foo"}, - expected: regexp.MustCompile(` -auto_auth \{ - - method \{ - type = "token_file" - - config \{ - token_file_path = ".*/.vault-token" - } - } -} - -template_config \{ - static_secret_render_interval = "5m" - exit_on_retry_failure = true -} - -vault \{ - address = "https://127.0.0.1:[0-9]{5}" -} - -env_template "FOO_PASSWORD" \{ - contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.password }}\{\{ end }}" - error_on_missing_key = true -} -env_template "FOO_USER" \{ - contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.user }}\{\{ end }}" - error_on_missing_key = true -} - -exec \{ - command = \["env"\] - restart_on_secret_changes = "always" - restart_stop_signal = "SIGTERM" -} -`), - expectedError: false, - }, - } - - for name, tc := range cases { - name, tc := name, tc - - t.Run(name, func(t *testing.T) { - var config bytes.Buffer - - c, err := generateConfiguration(ctx, client, tc.flagExec, tc.flagPaths) - c.WriteTo(&config) - - if tc.expectedError { - if err == nil { - t.Fatal("an error was expected but the test succeeded") - } - } else { - if err != nil { - t.Fatal(err) - } - - if !tc.expected.MatchString(config.String()) { - t.Fatalf("unexpected output; want: %v, got: %v", tc.expected.String(), config.String()) - } - } - }) - } -} diff --git a/command/agent_test.go b/command/agent_test.go index 406bdf6ac0ab3..c5138144ef9c0 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -1,14 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( - "crypto/tls" - "crypto/x509" "encoding/json" "fmt" - "io" + "io/ioutil" "net" "net/http" "os" @@ -19,15 +14,12 @@ import ( "testing" "time" - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" "github.com/hashicorp/vault/command/agent" - agentConfig "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/helper/testhelpers/minimal" - "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -35,49 +27,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const ( - BasicHclConfig = ` -log_file = "TMPDIR/juan.log" -log_level="warn" -log_rotate_max_files=2 -log_rotate_bytes=1048576 -vault { - address = "http://127.0.0.1:8200" - retry { - num_retries = 5 - } -} - -listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = false - tls_cert_file = "TMPDIR/reload_cert.pem" - tls_key_file = "TMPDIR/reload_key.pem" -}` - BasicHclConfig2 = ` -log_file = "TMPDIR/juan.log" -log_level="debug" -log_rotate_max_files=-1 -log_rotate_bytes=1048576 -vault { - address = "http://127.0.0.1:8200" - retry { - num_retries = 5 - } -} - -listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = false - tls_cert_file = "TMPDIR/reload_cert.pem" - tls_key_file = "TMPDIR/reload_key.pem" -}` -) - func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCommand) { tb.Helper() @@ -87,13 +39,195 @@ func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCo UI: ui, }, ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), logger: logger, - startedCh: make(chan struct{}, 5), - reloadedCh: make(chan struct{}, 5), } } +/* +func TestAgent_Cache_UnixListener(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger.Named("core"), + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, client.Address()) + + defer os.Setenv(api.EnvVaultCACert, os.Getenv(api.EnvVaultCACert)) + os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": agent.TestECDSAPubKey, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + inf, err := ioutil.TempFile("", "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + os.Remove(in) + t.Logf("input: %s", in) + + sink1f, err := ioutil.TempFile("", "sink1.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink1 := sink1f.Name() + sink1f.Close() + os.Remove(sink1) + t.Logf("sink1: %s", sink1) + + sink2f, err := ioutil.TempFile("", "sink2.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink2 := sink2f.Name() + sink2f.Close() + os.Remove(sink2) + t.Logf("sink2: %s", sink2) + + conff, err := ioutil.TempFile("", "conf.jwt.test.") + if err != nil { + t.Fatal(err) + } + conf := conff.Name() + conff.Close() + os.Remove(conf) + t.Logf("config: %s", conf) + + jwtToken, _ := agent.GetTestJWT(t) + if err := ioutil.WriteFile(in, []byte(jwtToken), 0600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + socketff, err := ioutil.TempFile("", "cache.socket.") + if err != nil { + t.Fatal(err) + } + socketf := socketff.Name() + socketff.Close() + os.Remove(socketf) + t.Logf("socketf: %s", socketf) + + config := ` +auto_auth { + method { + type = "jwt" + config = { + role = "test" + path = "%s" + } + } + + sink { + type = "file" + config = { + path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +} + +cache { + use_auto_auth_token = true + + listener "unix" { + address = "%s" + tls_disable = true + } +} +` + + config = fmt.Sprintf(config, in, sink1, sink2, socketf) + if err := ioutil.WriteFile(conf, []byte(config), 0600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test config", "path", conf) + } + + _, cmd := testAgentCommand(t, logger) + cmd.client = client + + // Kill the command 5 seconds after it starts + go func() { + select { + case <-cmd.ShutdownCh: + case <-time.After(5 * time.Second): + cmd.ShutdownCh <- struct{}{} + } + }() + + originalVaultAgentAddress := os.Getenv(api.EnvVaultAgentAddr) + + // Create a client that talks to the agent + os.Setenv(api.EnvVaultAgentAddr, socketf) + testClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + os.Setenv(api.EnvVaultAgentAddr, originalVaultAgentAddress) + + // Start the agent + go cmd.Run([]string{"-config", conf}) + + // Give some time for the auto-auth to complete + time.Sleep(1 * time.Second) + + // Invoke lookup self through the agent + secret, err := testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil || secret.Data["id"].(string) == "" { + t.Fatalf("failed to perform lookup self through agent") + } +} +*/ + func TestAgent_ExitAfterAuth(t *testing.T) { t.Run("via_config", func(t *testing.T) { testAgentExitAfterAuth(t, false) @@ -151,7 +285,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { t.Fatal(err) } - inf, err := os.CreateTemp("", "auth.jwt.test.") + inf, err := ioutil.TempFile("", "auth.jwt.test.") if err != nil { t.Fatal(err) } @@ -160,7 +294,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(in) t.Logf("input: %s", in) - sink1f, err := os.CreateTemp("", "sink1.jwt.test.") + sink1f, err := ioutil.TempFile("", "sink1.jwt.test.") if err != nil { t.Fatal(err) } @@ -169,7 +303,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(sink1) t.Logf("sink1: %s", sink1) - sink2f, err := os.CreateTemp("", "sink2.jwt.test.") + sink2f, err := ioutil.TempFile("", "sink2.jwt.test.") if err != nil { t.Fatal(err) } @@ -178,7 +312,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(sink2) t.Logf("sink2: %s", sink2) - conff, err := os.CreateTemp("", "conf.jwt.test.") + conff, err := ioutil.TempFile("", "conf.jwt.test.") if err != nil { t.Fatal(err) } @@ -188,7 +322,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { t.Logf("config: %s", conf) jwtToken, _ := agent.GetTestJWT(t) - if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test jwt", "path", in) @@ -227,7 +361,7 @@ auto_auth { ` config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) - if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { + if err := ioutil.WriteFile(conf, []byte(config), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test config", "path", conf) @@ -259,7 +393,7 @@ auto_auth { t.Fatal("timeout reached while waiting for agent to exit") } - sink1Bytes, err := os.ReadFile(sink1) + sink1Bytes, err := ioutil.ReadFile(sink1) if err != nil { t.Fatal(err) } @@ -267,7 +401,7 @@ auto_auth { t.Fatal("got no output from sink 1") } - sink2Bytes, err := os.ReadFile(sink2) + sink2Bytes, err := ioutil.ReadFile(sink2) if err != nil { t.Fatal(err) } @@ -516,16 +650,12 @@ listener "tcp" { } } -// TestAgent_Template_UserAgent Validates that the User-Agent sent to Vault -// as part of Templating requests is correct. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Agent. -func TestAgent_Template_UserAgent(t *testing.T) { +// TestAgent_Template tests rendering templates +func TestAgent_Template_Basic(t *testing.T) { //---------------------------------------------------- // Start the server and agent //---------------------------------------------------- logger := logging.NewVaultLogger(hclog.Trace) - var h userAgentHandler cluster := vault.NewTestCluster(t, &vault.CoreConfig{ Logger: logger, @@ -537,16 +667,7 @@ func TestAgent_Template_UserAgent(t *testing.T) { }, }, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentTemplatingString() - h.pathToCheck = "/v1/secret/data" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), + HandlerFunc: vaulthttp.Handler, }) cluster.Start() defer cluster.Cleanup() @@ -630,35 +751,61 @@ func TestAgent_Template_UserAgent(t *testing.T) { // make a temp directory to hold renders. Each test will create a temp dir // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + tmpDirRoot, err := ioutil.TempDir("", "agent-test-renders") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDirRoot) - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, "TestAgent_Template_UserAgent") - if err != nil { - t.Fatal(err) - } - // make some template files - var templatePaths []string - fileName := filepath.Join(tmpDir, "render_0.tmpl") - if err := os.WriteFile(fileName, []byte(templateContents(0)), 0o600); err != nil { - t.Fatal(err) + // start test cases here + testCases := map[string]struct { + templateCount int + exitAfterAuth bool + }{ + "one": { + templateCount: 1, + }, + "one_with_exit": { + templateCount: 1, + exitAfterAuth: true, + }, + "many": { + templateCount: 15, + }, + "many_with_exit": { + templateCount: 13, + exitAfterAuth: true, + }, } - templatePaths = append(templatePaths, fileName) - // build up the template config to be added to the Agent config.hcl file - var templateConfigStrings []string - for i, t := range templatePaths { - index := fmt.Sprintf("render_%d.json", i) - s := fmt.Sprintf(templateConfigString, t, tmpDir, index) - templateConfigStrings = append(templateConfigStrings, s) - } + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + // create temp dir for this test run + tmpDir, err := ioutil.TempDir(tmpDirRoot, tcname) + if err != nil { + t.Fatal(err) + } - // Create a config file - config := ` + // make some template files + var templatePaths []string + for i := 0; i < tc.templateCount; i++ { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) + if err := ioutil.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) + } + + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } + + // Create a config file + config := ` vault { address = "%s" tls_skip_verify = true @@ -676,292 +823,27 @@ auto_auth { } %s -` - // flatten the template configs - templateConfig := strings.Join(templateConfigStrings, " ") +%s +` - config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) + // conditionally set the exit_after_auth flag + exitAfterAuth := "" + if tc.exitAfterAuth { + exitAfterAuth = "exit_after_auth = true" + } - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - code := cmd.Run([]string{"-config", configPath}) - if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) - } - wg.Done() - }() + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // We need to shut down the Agent command - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() - }() - - verify := func(suffix string) { - t.Helper() - // We need to poll for a bit to give Agent time to render the - // templates. Without this, the test will attempt to read - // the temp dir before Agent has had time to render and will - // likely fail the test - tick := time.Tick(1 * time.Second) - timeout := time.After(10 * time.Second) - var err error - for { - select { - case <-timeout: - t.Fatalf("timed out waiting for templates to render, last error: %v", err) - case <-tick: - } - // Check for files rendered in the directory and break - // early for shutdown if we do have all the files - // rendered - - //---------------------------------------------------- - // Perform the tests - //---------------------------------------------------- - - if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { - err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) - continue - } - - for i := range templatePaths { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) - var c []byte - c, err = os.ReadFile(fileName) - if err != nil { - continue - } - if string(c) != templateRendered(i)+suffix { - err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) - continue - } - } - return - } - } - - verify("") - - fileName = filepath.Join(tmpDir, "render_0.tmpl") - if err := os.WriteFile(fileName, []byte(templateContents(0)+"{}"), 0o600); err != nil { - t.Fatal(err) - } - - verify("{}") -} - -// TestAgent_Template tests rendering templates -func TestAgent_Template_Basic(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, serverClient.Address()) - - // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // give test-role permissions to read the kv secret - req = serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") - req.BodyBytes = []byte(`{ - "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "token_ttl": "5m", - "token_policies":"default,myapp-read", - "policies":"default,myapp-read" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) - - // setup the kv secrets - req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") - req.BodyBytes = []byte(`{ - "options": {"version": "2"} - }`) - request(t, serverClient, req, 200) - - // populate a secret - req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "bar", - "password": "zap" - } - }`) - request(t, serverClient, req, 200) - - // populate another secret - req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "barstuff", - "password": "zap", - "cert": "something" - } - }`) - request(t, serverClient, req, 200) - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - - // start test cases here - testCases := map[string]struct { - templateCount int - exitAfterAuth bool - }{ - "one": { - templateCount: 1, - }, - "one_with_exit": { - templateCount: 1, - exitAfterAuth: true, - }, - "many": { - templateCount: 15, - }, - "many_with_exit": { - templateCount: 13, - exitAfterAuth: true, - }, - } - - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) - if err != nil { - t.Fatal(err) - } - - // make some template files - var templatePaths []string - for i := 0; i < tc.templateCount; i++ { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { - t.Fatal(err) - } - templatePaths = append(templatePaths, fileName) - } - - // build up the template config to be added to the Agent config.hcl file - var templateConfigStrings []string - for i, t := range templatePaths { - index := fmt.Sprintf("render_%d.json", i) - s := fmt.Sprintf(templateConfigString, t, tmpDir, index) - templateConfigStrings = append(templateConfigStrings, s) - } - - // Create a config file - config := ` -vault { - address = "%s" - tls_skip_verify = true -} - -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} - -%s - -%s -` - - // conditionally set the exit_after_auth flag - exitAfterAuth := "" - if tc.exitAfterAuth { - exitAfterAuth = "exit_after_auth = true" - } - - // flatten the template configs - templateConfig := strings.Join(templateConfigStrings, " ") - - config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) wg := &sync.WaitGroup{} wg.Add(1) @@ -1022,7 +904,7 @@ auto_auth { for i := range templatePaths { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) var c []byte - c, err = os.ReadFile(fileName) + c, err = ioutil.ReadFile(fileName) if err != nil { continue } @@ -1039,7 +921,7 @@ auto_auth { for i := 0; i < tc.templateCount; i++ { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := os.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { + if err := ioutil.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { t.Fatal(err) } } @@ -1052,7 +934,7 @@ auto_auth { func testListFiles(t *testing.T, dir, extension string) int { t.Helper() - files, err := os.ReadDir(dir) + files, err := ioutil.ReadDir(dir) if err != nil { t.Fatal(err) } @@ -1182,14 +1064,14 @@ func TestAgent_Template_ExitCounter(t *testing.T) { // make a temp directory to hold renders. Each test will create a temp dir // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + tmpDirRoot, err := ioutil.TempDir("", "agent-test-renders") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDirRoot) // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, "agent-test") + tmpDir, err := ioutil.TempDir(tmpDirRoot, "agent-test") if err != nil { t.Fatal(err) } @@ -1271,7 +1153,7 @@ exit_after_auth = true // Perform the tests //---------------------------------------------------- - files, err := os.ReadDir(tmpDir) + files, err := ioutil.ReadDir(tmpDir) if err != nil { t.Fatal(err) } @@ -1334,7 +1216,7 @@ func request(t *testing.T, client *api.Client, req *api.Request, expectedStatusC t.Fatalf("expected status code %d, not %d", expectedStatusCode, resp.StatusCode) } - bytes, err := io.ReadAll(resp.Body) + bytes, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("err: %s", err) } @@ -1353,7 +1235,7 @@ func request(t *testing.T, client *api.Client, req *api.Request, expectedStatusC // makeTempFile creates a temp file and populates it. func makeTempFile(t *testing.T, name, contents string) string { t.Helper() - f, err := os.CreateTemp("", name) + f, err := ioutil.TempFile("", name) if err != nil { t.Fatal(err) } @@ -1363,27 +1245,6 @@ func makeTempFile(t *testing.T, name, contents string) string { return path } -func populateTempFile(t *testing.T, name, contents string) *os.File { - t.Helper() - - file, err := os.CreateTemp(t.TempDir(), name) - if err != nil { - t.Fatal(err) - } - - _, err = file.WriteString(contents) - if err != nil { - t.Fatal(err) - } - - err = file.Close() - if err != nil { - t.Fatal(err) - } - - return file -} - // handler makes 500 errors happen for reads on /v1/secret. // Definitely not thread-safe, do not use t.Parallel with this. type handler struct { @@ -1402,28 +1263,7 @@ func (h *handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { } h.t.Logf("passing GET request on %s", req.URL.Path) } - vaulthttp.Handler.Handler(h.props).ServeHTTP(resp, req) -} - -// userAgentHandler makes it easy to test the User-Agent header received -// by Vault -type userAgentHandler struct { - props *vault.HandlerProperties - failCount int - userAgentToCheckFor string - pathToCheck string - requestMethodToCheck string - t *testing.T -} - -func (h *userAgentHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if req.Method == h.requestMethodToCheck && strings.Contains(req.RequestURI, h.pathToCheck) { - userAgent := req.UserAgent() - if !(userAgent == h.userAgentToCheckFor) { - h.t.Fatalf("User-Agent string not as expected. Expected to find %s, got %s", h.userAgentToCheckFor, userAgent) - } - } - vaulthttp.Handler.Handler(h.props).ServeHTTP(w, req) + vaulthttp.Handler(h.props).ServeHTTP(resp, req) } // TestAgent_Template_Retry verifies that the template server retries requests @@ -1446,12 +1286,11 @@ func TestAgent_Template_Retry(t *testing.T) { }, &vault.TestClusterOptions{ NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.t = t - return &h - }), + HandlerFunc: func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }, }) cluster.Start() defer cluster.Cleanup() @@ -1489,7 +1328,7 @@ func TestAgent_Template_Retry(t *testing.T) { // make a temp directory to hold renders. Each test will create a temp dir // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + tmpDirRoot, err := ioutil.TempDir("", "agent-test-renders") if err != nil { t.Fatal(err) } @@ -1537,14 +1376,14 @@ func TestAgent_Template_Retry(t *testing.T) { h.failCount = 6 // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) + tmpDir, err := ioutil.TempDir(tmpDirRoot, tcname) if err != nil { t.Fatal(err) } // make some template files templatePath := filepath.Join(tmpDir, "render_0.tmpl") - if err := os.WriteFile(templatePath, []byte(templateContents(0)), 0o600); err != nil { + if err := ioutil.WriteFile(templatePath, []byte(templateContents(0)), 0o600); err != nil { t.Fatal(err) } templateConfig := fmt.Sprintf(templateConfigString, templatePath, tmpDir, "render_0.json") @@ -1618,7 +1457,7 @@ template_config { fileName := filepath.Join(tmpDir, "render_0.json") var c []byte - c, err = os.ReadFile(fileName) + c, err = ioutil.ReadFile(fileName) if err != nil { continue } @@ -1627,540 +1466,96 @@ template_config { continue } return nil - } - } - - err = verify() - close(cmd.ShutdownCh) - wg.Wait() - - switch { - case (code != 0 || err != nil) && tc.expectError: - case code == 0 && err == nil && !tc.expectError: - default: - t.Fatalf("%s expectError=%v error=%v code=%d", tcname, tc.expectError, err, code) - } - }) - } -} - -// prepAgentApproleKV configures a Vault instance for approle authentication, -// such that the resulting token will have global permissions across /kv -// and /secret mounts. Returns the auto_auth config stanza to setup an Agent -// to connect using approle. -func prepAgentApproleKV(t *testing.T, client *api.Client) (string, func()) { - t.Helper() - - policyAutoAuthAppRole := ` -path "/kv/*" { - capabilities = ["create", "read", "update", "delete", "list"] -} -path "/secret/*" { - capabilities = ["create", "read", "update", "delete", "list"] -} -` - // Add an kv-admin policy - if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil { - t.Fatal(err) - } - - // Enable approle - err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ - Type: "approle", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ - "bind_secret_id": "true", - "token_ttl": "1h", - "token_max_ttl": "2h", - "policies": []string{"test-autoauth"}, - }) - if err != nil { - t.Fatal(err) - } - - resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) - if err != nil { - t.Fatal(err) - } - secretID := resp.Data["secret_id"].(string) - secretIDFile := makeTempFile(t, "secret_id.txt", secretID+"\n") - - resp, err = client.Logical().Read("auth/approle/role/test1/role-id") - if err != nil { - t.Fatal(err) - } - roleID := resp.Data["role_id"].(string) - roleIDFile := makeTempFile(t, "role_id.txt", roleID+"\n") - - config := fmt.Sprintf(` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} -`, roleIDFile, secretIDFile) - - cleanup := func() { - _ = os.Remove(roleIDFile) - _ = os.Remove(secretIDFile) - } - return config, cleanup -} - -// TestAgent_AutoAuth_UserAgent tests that the User-Agent sent -// to Vault by Vault Agent is correct when performing Auto-Auth. -// Uses the custom handler userAgentHandler (defined above) so -// that Vault validates the User-Agent on requests sent by Agent. -func TestAgent_AutoAuth_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - var h userAgentHandler - cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - }, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentAutoAuthString() - h.requestMethodToCheck = "PUT" - h.pathToCheck = "auth/approle/login" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "secret_id_num_uses": "10", - "secret_id_ttl": "1m", - "token_max_ttl": "1m", - "token_num_uses": "10", - "token_ttl": "1m", - "policies": "default" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) - - sinkf, err := os.CreateTemp("", "sink.test.") - if err != nil { - t.Fatal(err) - } - sink := sinkf.Name() - sinkf.Close() - os.Remove(sink) - - autoAuthConfig := fmt.Sprintf(` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - } - } - - sink "file" { - config = { - path = "%s" - } - } -}`, roleIDPath, secretIDPath, sink) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -api_proxy { - use_auto_auth_token = true -} -%s -%s -`, serverClient.Address(), listenConfig, autoAuthConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // Validate that the auto-auth token has been correctly attained - // and works for LookupSelf - conf := api.DefaultConfig() - conf.Address = "http://" + listenAddr - agentClient, err := api.NewClient(conf) - if err != nil { - t.Fatalf("err: %s", err) - } - - agentClient.SetToken("") - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - // Wait for the token to be sent to syncs and be available to be used - time.Sleep(5 * time.Second) - - req = agentClient.NewRequest("GET", "/v1/auth/token/lookup-self") - body = request(t, agentClient, req, 200) - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestAgent_APIProxyWithoutCache_UserAgent tests that the User-Agent sent -// to Vault by Vault Agent is correct using the API proxy without -// the cache configured. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Agent. -func TestAgent_APIProxyWithoutCache_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - userAgentForProxiedClient := "proxied-client" - var h userAgentHandler - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) - h.pathToCheck = "/v1/auth/token/lookup-self" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -`, serverClient.Address(), listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - agentClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - agentClient.AddHeader("User-Agent", userAgentForProxiedClient) - agentClient.SetToken(serverClient.Token()) - agentClient.SetMaxRetries(0) - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - _, err = agentClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestAgent_APIProxyWithCache_UserAgent tests that the User-Agent sent -// to Vault by Vault Agent is correct using the API proxy with -// the cache configured. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Agent. -func TestAgent_APIProxyWithCache_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - userAgentForProxiedClient := "proxied-client" - var h userAgentHandler - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) - h.pathToCheck = "/v1/auth/token/lookup-self" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - cacheConfig := ` -cache { -}` - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), listenConfig, cacheConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - agentClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - agentClient.AddHeader("User-Agent", userAgentForProxiedClient) - agentClient.SetToken(serverClient.Token()) - agentClient.SetMaxRetries(0) - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - _, err = agentClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -func TestAgent_Cache_DynamicSecret(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - cacheConfig := ` -cache { -} -` - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), cacheConfig, listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) + } + } - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() + err = verify() + close(cmd.ShutdownCh) + wg.Wait() - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") + switch { + case (code != 0 || err != nil) && tc.expectError: + case code == 0 && err == nil && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v code=%d", tcname, tc.expectError, err, code) + } + }) } +} - agentClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { +// prepAgentApproleKV configures a Vault instance for approle authentication, +// such that the resulting token will have global permissions across /kv +// and /secret mounts. Returns the auto_auth config stanza to setup an Agent +// to connect using approle. +func prepAgentApproleKV(t *testing.T, client *api.Client) (string, func()) { + t.Helper() + + policyAutoAuthAppRole := ` +path "/kv/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +path "/secret/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +` + // Add an kv-admin policy + if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil { t.Fatal(err) } - agentClient.SetToken(serverClient.Token()) - agentClient.SetMaxRetries(0) - err = agentClient.SetAddress("http://" + listenAddr) + + // Enable approle + err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) if err != nil { t.Fatal(err) } - renewable := true - tokenCreateRequest := &api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", - Renewable: &renewable, + _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ + "bind_secret_id": "true", + "token_ttl": "1h", + "token_max_ttl": "2h", + "policies": []string{"test-autoauth"}, + }) + if err != nil { + t.Fatal(err) } - // This was the simplest test I could find to trigger the caching behaviour, - // i.e. the most concise I could make the test that I can tell - // creating an orphan token returns Auth, is renewable, and isn't a token - // that's managed elsewhere (since it's an orphan) - secret, err := agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) if err != nil { t.Fatal(err) } - if secret == nil || secret.Auth == nil { - t.Fatalf("secret not as expected: %v", secret) - } - - token := secret.Auth.ClientToken + secretID := resp.Data["secret_id"].(string) + secretIDFile := makeTempFile(t, "secret_id.txt", secretID+"\n") - secret, err = agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) + resp, err = client.Logical().Read("auth/approle/role/test1/role-id") if err != nil { t.Fatal(err) } - if secret == nil || secret.Auth == nil { - t.Fatalf("secret not as expected: %v", secret) - } + roleID := resp.Data["role_id"].(string) + roleIDFile := makeTempFile(t, "role_id.txt", roleID+"\n") - token2 := secret.Auth.ClientToken + config := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} +`, roleIDFile, secretIDFile) - if token != token2 { - t.Fatalf("token create response not cached when it should have been, as tokens differ") + cleanup := func() { + _ = os.Remove(roleIDFile) + _ = os.Remove(secretIDFile) } - - close(cmd.ShutdownCh) - wg.Wait() + return config, cleanup } -func TestAgent_ApiProxy_Retry(t *testing.T) { +func TestAgent_Cache_Retry(t *testing.T) { //---------------------------------------------------- // Start the server and agent //---------------------------------------------------- @@ -2178,11 +1573,11 @@ func TestAgent_ApiProxy_Retry(t *testing.T) { }, &vault.TestClusterOptions{ NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { + HandlerFunc: func(properties *vault.HandlerProperties) http.Handler { h.props = properties h.t = t return &h - }), + }, }) cluster.Start() defer cluster.Cleanup() @@ -2262,6 +1657,7 @@ vault { %s %s `, serverClient.Address(), retryConf, cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) defer os.Remove(configPath) @@ -2368,7 +1764,7 @@ func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { // make a temp directory to hold renders. Each test will create a temp dir // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + tmpDirRoot, err := ioutil.TempDir("", "agent-test-renders") if err != nil { t.Fatal(err) } @@ -2472,7 +1868,7 @@ func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcName) + tmpDir, err := ioutil.TempDir(tmpDirRoot, tcName) if err != nil { t.Fatal(err) } @@ -2581,7 +1977,7 @@ vault { fileName := filepath.Join(tmpDir, "render_0.json") var c []byte - c, err = os.ReadFile(fileName) + c, err = ioutil.ReadFile(fileName) if err != nil { continue } @@ -2704,7 +2100,24 @@ func TestAgent_Quit(t *testing.T) { //---------------------------------------------------- // Start the server and agent //---------------------------------------------------- - cluster := minimal.NewTestSoloCluster(t, nil) + logger := logging.NewVaultLogger(hclog.Error) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) serverClient := cluster.Cores[0].Client // Unset the environment variable so that agent picks up the right test @@ -2743,7 +2156,7 @@ cache {} defer os.Remove(configPath) // Start the agent - _, cmd := testAgentCommand(t, nil) + _, cmd := testAgentCommand(t, logger) cmd.startedCh = make(chan struct{}) wg := &sync.WaitGroup{} @@ -2798,305 +2211,6 @@ cache {} wg.Wait() } -func TestAgent_LogFile_CliOverridesConfig(t *testing.T) { - // Create basic config - configFile := populateTempFile(t, "agent-config.hcl", BasicHclConfig) - cfg, err := agentConfig.LoadConfigFile(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Sanity check that the config value is the current value - assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile) - - // Initialize the command and parse any flags - cmd := &AgentCommand{BaseCommand: &BaseCommand{}} - f := cmd.Flags() - // Simulate the flag being specified - err = f.Parse([]string{"-log-file=/foo/bar/test.log"}) - if err != nil { - t.Fatal(err) - } - - // Update the config based on the inputs. - cmd.applyConfigOverrides(f, cfg) - - assert.NotEqual(t, "TMPDIR/juan.log", cfg.LogFile) - assert.NotEqual(t, "/squiggle/logs.txt", cfg.LogFile) - assert.Equal(t, "/foo/bar/test.log", cfg.LogFile) -} - -func TestAgent_LogFile_Config(t *testing.T) { - configFile := populateTempFile(t, "agent-config.hcl", BasicHclConfig) - - cfg, err := agentConfig.LoadConfigFile(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Sanity check that the config value is the current value - assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "sanity check on log config failed") - assert.Equal(t, 2, cfg.LogRotateMaxFiles) - assert.Equal(t, 1048576, cfg.LogRotateBytes) - - // Parse the cli flags (but we pass in an empty slice) - cmd := &AgentCommand{BaseCommand: &BaseCommand{}} - f := cmd.Flags() - err = f.Parse([]string{}) - if err != nil { - t.Fatal(err) - } - - // Should change nothing... - cmd.applyConfigOverrides(f, cfg) - - assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "actual config check") - assert.Equal(t, 2, cfg.LogRotateMaxFiles) - assert.Equal(t, 1048576, cfg.LogRotateBytes) -} - -func TestAgent_Config_NewLogger_Default(t *testing.T) { - cmd := &AgentCommand{BaseCommand: &BaseCommand{}} - cmd.config = agentConfig.NewConfig() - logger, err := cmd.newLogger() - - assert.NoError(t, err) - assert.NotNil(t, logger) - assert.Equal(t, hclog.Info.String(), logger.GetLevel().String()) -} - -func TestAgent_Config_ReloadLogLevel(t *testing.T) { - cmd := &AgentCommand{BaseCommand: &BaseCommand{}} - var err error - tempDir := t.TempDir() - - // Load an initial config - hcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) - configFile := populateTempFile(t, "agent-config.hcl", hcl) - cmd.config, err = agentConfig.LoadConfigFile(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Tweak the loaded config to make sure we can put log files into a temp dir - // and systemd log attempts work fine, this would usually happen during Run. - cmd.logWriter = os.Stdout - cmd.logger, err = cmd.newLogger() - if err != nil { - t.Fatal("logger required for systemd log messages", err) - } - - // Sanity check - assert.Equal(t, "warn", cmd.config.LogLevel) - - // Load a new config - hcl = strings.ReplaceAll(BasicHclConfig2, "TMPDIR", tempDir) - configFile = populateTempFile(t, "agent-config.hcl", hcl) - err = cmd.reloadConfig([]string{configFile.Name()}) - assert.NoError(t, err) - assert.Equal(t, "debug", cmd.config.LogLevel) -} - -func TestAgent_Config_ReloadTls(t *testing.T) { - var wg sync.WaitGroup - wd, err := os.Getwd() - if err != nil { - t.Fatal("unable to get current working directory") - } - workingDir := filepath.Join(wd, "/agent/test-fixtures/reload") - fooCert := "reload_foo.pem" - fooKey := "reload_foo.key" - - barCert := "reload_bar.pem" - barKey := "reload_bar.key" - - reloadCert := "reload_cert.pem" - reloadKey := "reload_key.pem" - caPem := "reload_ca.pem" - - tempDir := t.TempDir() - - // Set up initial 'foo' certs - inBytes, err := os.ReadFile(filepath.Join(workingDir, fooCert)) - if err != nil { - t.Fatal("unable to read cert required for test", fooCert, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert required for test", reloadCert, err) - } - - inBytes, err = os.ReadFile(filepath.Join(workingDir, fooKey)) - if err != nil { - t.Fatal("unable to read cert key required for test", fooKey, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert key required for test", reloadKey, err) - } - - inBytes, err = os.ReadFile(filepath.Join(workingDir, caPem)) - if err != nil { - t.Fatal("unable to read CA pem required for test", caPem, err) - } - certPool := x509.NewCertPool() - ok := certPool.AppendCertsFromPEM(inBytes) - if !ok { - t.Fatal("not ok when appending CA cert") - } - - replacedHcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) - configFile := populateTempFile(t, "agent-config.hcl", replacedHcl) - - // Set up Agent/cmd - logger := logging.NewVaultLogger(hclog.Trace) - ui, cmd := testAgentCommand(t, logger) - - wg.Add(1) - args := []string{"-config", configFile.Name()} - go func() { - if code := cmd.Run(args); code != 0 { - output := ui.ErrorWriter.String() + ui.OutputWriter.String() - t.Errorf("got a non-zero exit status: %s", output) - } - wg.Done() - }() - - testCertificateName := func(cn string) error { - conn, err := tls.Dial("tcp", "127.0.0.1:8100", &tls.Config{ - RootCAs: certPool, - }) - if err != nil { - return err - } - defer conn.Close() - if err = conn.Handshake(); err != nil { - return err - } - servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName - if servName != cn { - return fmt.Errorf("expected %s, got %s", cn, servName) - } - return nil - } - - // Start - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - - if err := testCertificateName("foo.example.com"); err != nil { - t.Fatalf("certificate name didn't check out: %s", err) - } - - // Swap out certs - inBytes, err = os.ReadFile(filepath.Join(workingDir, barCert)) - if err != nil { - t.Fatal("unable to read cert required for test", barCert, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert required for test", reloadCert, err) - } - - inBytes, err = os.ReadFile(filepath.Join(workingDir, barKey)) - if err != nil { - t.Fatal("unable to read cert key required for test", barKey, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert key required for test", reloadKey, err) - } - - // Reload - cmd.SighupCh <- struct{}{} - select { - case <-cmd.reloadedCh: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - - if err := testCertificateName("bar.example.com"); err != nil { - t.Fatalf("certificate name didn't check out: %s", err) - } - - // Shut down - cmd.ShutdownCh <- struct{}{} - - wg.Wait() -} - -// TestAgent_NonTLSListener_SIGHUP tests giving a SIGHUP signal to a listener -// without a TLS configuration. Prior to fixing GitHub issue #19480, this -// would cause a panic. -func TestAgent_NonTLSListener_SIGHUP(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -`, serverClient.Address(), listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - if code := cmd.Run([]string{"-config", configPath}); code != 0 { - output := ui.ErrorWriter.String() + ui.OutputWriter.String() - t.Errorf("got a non-zero exit status: %s", output) - } - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // Reload - cmd.SighupCh <- struct{}{} - select { - case <-cmd.reloadedCh: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - - close(cmd.ShutdownCh) - wg.Wait() -} - // Get a randomly assigned port and then free it again before returning it. // There is still a race when trying to use it, but should work better // than a static port. diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go deleted file mode 100644 index fdcf12f947e22..0000000000000 --- a/command/agentproxyshared/auth/auth.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package auth - -import ( - "context" - "encoding/json" - "errors" - "math/rand" - "net/http" - "time" - - "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/jsonutil" -) - -const ( - defaultMinBackoff = 1 * time.Second - defaultMaxBackoff = 5 * time.Minute -) - -// AuthMethod is the interface that auto-auth methods implement for the agent/proxy -// to use. -type AuthMethod interface { - // Authenticate returns a mount path, header, request body, and error. - // The header may be nil if no special header is needed. - Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) - NewCreds() chan struct{} - CredSuccess() - Shutdown() -} - -// AuthMethodWithClient is an extended interface that can return an API client -// for use during the authentication call. -type AuthMethodWithClient interface { - AuthMethod - AuthClient(client *api.Client) (*api.Client, error) -} - -type AuthConfig struct { - Logger hclog.Logger - MountPath string - WrapTTL time.Duration - Config map[string]interface{} -} - -// AuthHandler is responsible for keeping a token alive and renewed and passing -// new tokens to the sink server -type AuthHandler struct { - OutputCh chan string - TemplateTokenCh chan string - ExecTokenCh chan string - token string - userAgent string - metricsSignifier string - logger hclog.Logger - client *api.Client - random *rand.Rand - wrapTTL time.Duration - maxBackoff time.Duration - minBackoff time.Duration - enableReauthOnNewCredentials bool - enableTemplateTokenCh bool - enableExecTokenCh bool - exitOnError bool -} - -type AuthHandlerConfig struct { - Logger hclog.Logger - Client *api.Client - WrapTTL time.Duration - MaxBackoff time.Duration - MinBackoff time.Duration - Token string - // UserAgent is the HTTP UserAgent header auto-auth will use when - // communicating with Vault. - UserAgent string - // MetricsSignifier is the first argument we will give to - // metrics.IncrCounter, signifying what the name of the application is - MetricsSignifier string - EnableReauthOnNewCredentials bool - EnableTemplateTokenCh bool - EnableExecTokenCh bool - ExitOnError bool -} - -func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { - ah := &AuthHandler{ - // This is buffered so that if we try to output after the sink server - // has been shut down, during agent/proxy shutdown, we won't block - OutputCh: make(chan string, 1), - TemplateTokenCh: make(chan string, 1), - ExecTokenCh: make(chan string, 1), - token: conf.Token, - logger: conf.Logger, - client: conf.Client, - random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - wrapTTL: conf.WrapTTL, - minBackoff: conf.MinBackoff, - maxBackoff: conf.MaxBackoff, - enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, - enableTemplateTokenCh: conf.EnableTemplateTokenCh, - enableExecTokenCh: conf.EnableExecTokenCh, - exitOnError: conf.ExitOnError, - userAgent: conf.UserAgent, - metricsSignifier: conf.MetricsSignifier, - } - - return ah -} - -func backoff(ctx context.Context, backoff *autoAuthBackoff) bool { - if backoff.exitOnErr { - return false - } - - select { - case <-time.After(backoff.current): - case <-ctx.Done(): - } - - // Increase exponential backoff for the next time if we don't - // successfully auth/renew/etc. - backoff.next() - return true -} - -func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { - if am == nil { - return errors.New("auth handler: nil auth method") - } - - if ah.minBackoff <= 0 { - ah.minBackoff = defaultMinBackoff - } - - backoffCfg := newAutoAuthBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) - - if backoffCfg.min >= backoffCfg.max { - return errors.New("auth handler: min_backoff cannot be greater than max_backoff") - } - - ah.logger.Info("starting auth handler") - defer func() { - am.Shutdown() - close(ah.OutputCh) - close(ah.TemplateTokenCh) - close(ah.ExecTokenCh) - ah.logger.Info("auth handler stopped") - }() - - credCh := am.NewCreds() - if !ah.enableReauthOnNewCredentials { - realCredCh := credCh - credCh = nil - if realCredCh != nil { - go func() { - for { - select { - case <-ctx.Done(): - return - case <-realCredCh: - } - } - }() - } - } - if credCh == nil { - credCh = make(chan struct{}) - } - - if ah.client != nil { - headers := ah.client.Headers() - if headers == nil { - headers = make(http.Header) - } - headers.Set("User-Agent", ah.userAgent) - ah.client.SetHeaders(headers) - } - - var watcher *api.LifetimeWatcher - first := true - - for { - select { - case <-ctx.Done(): - return nil - - default: - } - - var clientToUse *api.Client - var err error - var path string - var data map[string]interface{} - var header http.Header - var isTokenFileMethod bool - - switch am.(type) { - case AuthMethodWithClient: - clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) - if err != nil { - ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - - return err - } - default: - clientToUse = ah.client - } - - // Disable retry on the client to ensure our backoffOrQuit function is - // the only source of retry/backoff. - clientToUse.SetMaxRetries(0) - - var secret *api.Secret = new(api.Secret) - if first && ah.token != "" { - ah.logger.Debug("using preloaded token") - - first = false - ah.logger.Debug("lookup-self with preloaded token") - clientToUse.SetToken(ah.token) - - secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) - if err != nil { - ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - duration, _ := secret.Data["ttl"].(json.Number).Int64() - secret.Auth = &api.SecretAuth{ - ClientToken: secret.Data["id"].(string), - LeaseDuration: int(duration), - Renewable: secret.Data["renewable"].(bool), - } - } else { - ah.logger.Info("authenticating") - - path, header, data, err = am.Authenticate(ctx, ah.client) - if err != nil { - ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - } - - if ah.wrapTTL > 0 { - wrapClient, err := clientToUse.Clone() - if err != nil { - ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - wrapClient.SetWrappingLookupFunc(func(string, string) string { - return ah.wrapTTL.String() - }) - clientToUse = wrapClient - } - for key, values := range header { - for _, value := range values { - clientToUse.AddHeader(key, value) - } - } - - // This should only happen if there's no preloaded token (regular auto-auth login) - // or if a preloaded token has expired and is now switching to auto-auth. - if secret.Auth == nil { - isTokenFileMethod = path == "auth/token/lookup-self" - if isTokenFileMethod { - token, _ := data["token"].(string) - lookupSelfClient, err := clientToUse.Clone() - if err != nil { - ah.logger.Error("failed to clone client to perform token lookup") - return err - } - lookupSelfClient.SetToken(token) - secret, err = lookupSelfClient.Auth().Token().LookupSelf() - } else { - secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) - } - - // Check errors/sanity - if err != nil { - ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - } - - var leaseDuration int - - switch { - case ah.wrapTTL > 0: - if secret.WrapInfo == nil { - ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - if secret.WrapInfo.Token == "" { - ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) - if err != nil { - ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") - ah.OutputCh <- string(wrappedResp) - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- string(wrappedResp) - } - if ah.enableExecTokenCh { - ah.ExecTokenCh <- string(wrappedResp) - } - - am.CredSuccess() - backoffCfg.reset() - - select { - case <-ctx.Done(): - ah.logger.Info("shutdown triggered") - continue - - case <-credCh: - ah.logger.Info("auth method found new credentials, re-authenticating") - continue - } - - default: - // We handle the token_file method specially, as it's the only - // auth method that isn't actually authenticating, i.e. the secret - // returned does not have an Auth struct attached - isTokenFileMethod := path == "auth/token/lookup-self" - if isTokenFileMethod { - // We still check the response of the request to ensure the token is valid - // i.e. if the token is invalid, we will fail in the authentication step - if secret == nil || secret.Data == nil { - ah.logger.Error("token file validation failed, token may be invalid", "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - token, ok := secret.Data["id"].(string) - if !ok || token == "" { - ah.logger.Error("token file validation returned empty client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - duration, _ := secret.Data["ttl"].(json.Number).Int64() - leaseDuration = int(duration) - renewable, _ := secret.Data["renewable"].(bool) - secret.Auth = &api.SecretAuth{ - ClientToken: token, - LeaseDuration: int(duration), - Renewable: renewable, - } - ah.logger.Info("authentication successful, sending token to sinks") - ah.OutputCh <- token - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- token - } - if ah.enableExecTokenCh { - ah.ExecTokenCh <- token - } - - tokenType := secret.Data["type"].(string) - if tokenType == "batch" { - ah.logger.Info("note that this token type is batch, and batch tokens cannot be renewed", "ttl", leaseDuration) - } - } else { - if secret == nil || secret.Auth == nil { - ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - if secret.Auth.ClientToken == "" { - ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - leaseDuration = secret.LeaseDuration - ah.logger.Info("authentication successful, sending token to sinks") - ah.OutputCh <- secret.Auth.ClientToken - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- secret.Auth.ClientToken - } - if ah.enableExecTokenCh { - ah.ExecTokenCh <- secret.Auth.ClientToken - } - } - - am.CredSuccess() - backoffCfg.reset() - } - - if watcher != nil { - watcher.Stop() - } - - watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ - Secret: secret, - }) - if err != nil { - ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) - // We don't want to trigger the renewal process for tokens with - // unlimited TTL, such as the root token. - if leaseDuration == 0 && isTokenFileMethod { - ah.logger.Info("not starting token renewal process, as token has unlimited TTL") - } else { - ah.logger.Info("starting renewal process") - go watcher.Renew() - } - - LifetimeWatcherLoop: - for { - select { - case <-ctx.Done(): - ah.logger.Info("shutdown triggered, stopping lifetime watcher") - watcher.Stop() - break LifetimeWatcherLoop - - case err := <-watcher.DoneCh(): - ah.logger.Info("lifetime watcher done channel triggered") - if err != nil { - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) - ah.logger.Error("error renewing token", "error", err) - } - break LifetimeWatcherLoop - - case <-watcher.RenewCh(): - metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) - ah.logger.Info("renewed auth token") - - case <-credCh: - ah.logger.Info("auth method found new credentials, re-authenticating") - break LifetimeWatcherLoop - } - } - } -} - -// autoAuthBackoff tracks exponential backoff state. -type autoAuthBackoff struct { - min time.Duration - max time.Duration - current time.Duration - exitOnErr bool -} - -func newAutoAuthBackoff(min, max time.Duration, exitErr bool) *autoAuthBackoff { - if max <= 0 { - max = defaultMaxBackoff - } - - if min <= 0 { - min = defaultMinBackoff - } - - return &autoAuthBackoff{ - current: min, - max: max, - min: min, - exitOnErr: exitErr, - } -} - -// next determines the next backoff duration that is roughly twice -// the current value, capped to a max value, with a measure of randomness. -func (b *autoAuthBackoff) next() { - maxBackoff := 2 * b.current - - if maxBackoff > b.max { - maxBackoff = b.max - } - - // Trim a random amount (0-25%) off the doubled duration - trim := rand.Int63n(int64(maxBackoff) / 4) - b.current = maxBackoff - time.Duration(trim) -} - -func (b *autoAuthBackoff) reset() { - b.current = b.min -} - -func (b autoAuthBackoff) String() string { - return b.current.Truncate(10 * time.Millisecond).String() -} diff --git a/command/agentproxyshared/auth/cert/cert.go b/command/agentproxyshared/auth/cert/cert.go deleted file mode 100644 index 5270dcb1b4a27..0000000000000 --- a/command/agentproxyshared/auth/cert/cert.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cert - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - "github.com/hashicorp/vault/sdk/helper/consts" -) - -type certMethod struct { - logger hclog.Logger - mountPath string - name string - - caCert string - clientCert string - clientKey string - reload bool - - // Client is the cached client to use if cert info was provided. - client *api.Client -} - -var _ auth.AuthMethodWithClient = &certMethod{} - -func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - - // Not concerned if the conf.Config is empty as the 'name' - // parameter is optional when using TLS Auth - - c := &certMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - } - - if conf.Config != nil { - nameRaw, ok := conf.Config["name"] - if !ok { - nameRaw = "" - } - c.name, ok = nameRaw.(string) - if !ok { - return nil, errors.New("could not convert 'name' config value to string") - } - - caCertRaw, ok := conf.Config["ca_cert"] - if ok { - c.caCert, ok = caCertRaw.(string) - if !ok { - return nil, errors.New("could not convert 'ca_cert' config value to string") - } - } - - clientCertRaw, ok := conf.Config["client_cert"] - if ok { - c.clientCert, ok = clientCertRaw.(string) - if !ok { - return nil, errors.New("could not convert 'cert_file' config value to string") - } - } - - clientKeyRaw, ok := conf.Config["client_key"] - if ok { - c.clientKey, ok = clientKeyRaw.(string) - if !ok { - return nil, errors.New("could not convert 'cert_key' config value to string") - } - } - - reload, ok := conf.Config["reload"] - if ok { - c.reload, ok = reload.(bool) - if !ok { - return nil, errors.New("could not convert 'reload' config value to bool") - } - } - } - - return c, nil -} - -func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { - c.logger.Trace("beginning authentication") - - authMap := map[string]interface{}{} - - if c.name != "" { - authMap["name"] = c.name - } - - return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil -} - -func (c *certMethod) NewCreds() chan struct{} { - return nil -} - -func (c *certMethod) CredSuccess() {} - -func (c *certMethod) Shutdown() {} - -// AuthClient uses the existing client's address and returns a new client with -// the auto-auth method's certificate information if that's provided in its -// config map. -func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { - c.logger.Trace("deriving auth client to use") - - clientToAuth := client - - if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { - // Return cached client if present - if c.client != nil && !c.reload { - return c.client, nil - } - - config := api.DefaultConfig() - if config.Error != nil { - return nil, config.Error - } - config.Address = client.Address() - - t := &api.TLSConfig{ - CACert: c.caCert, - ClientCert: c.clientCert, - ClientKey: c.clientKey, - } - - // Setup TLS config - if err := config.ConfigureTLS(t); err != nil { - return nil, err - } - - var err error - clientToAuth, err = api.NewClient(config) - if err != nil { - return nil, err - } - if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { - clientToAuth.SetNamespace(ns) - } - - // Cache the client for future use - c.client = clientToAuth - } - - return clientToAuth, nil -} diff --git a/command/agentproxyshared/auth/cert/cert_test.go b/command/agentproxyshared/auth/cert/cert_test.go deleted file mode 100644 index 43a5f83f4c29d..0000000000000 --- a/command/agentproxyshared/auth/cert/cert_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cert - -import ( - "context" - "os" - "path" - "reflect" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" -) - -func TestCertAuthMethod_Authenticate(t *testing.T) { - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "foo", - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - loginPath, _, authMap, err := method.Authenticate(context.Background(), client) - if err != nil { - t.Fatal(err) - } - - expectedLoginPath := path.Join(config.MountPath, "/login") - if loginPath != expectedLoginPath { - t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) - } - - expectedAuthMap := map[string]interface{}{ - "name": config.Config["name"], - } - if !reflect.DeepEqual(authMap, expectedAuthMap) { - t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) - } -} - -func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "without-certs", - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client != clientToUse { - t.Fatal("error: expected AuthClient to return back original client") - } -} - -func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { - clientCert, err := os.Open("./test-fixtures/keys/cert.pem") - if err != nil { - t.Fatal(err) - } - defer clientCert.Close() - - clientKey, err := os.Open("./test-fixtures/keys/key.pem") - if err != nil { - t.Fatal(err) - } - defer clientKey.Close() - - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "with-certs", - "client_cert": clientCert.Name(), - "client_key": clientKey.Name(), - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client == clientToUse { - t.Fatal("expected client from AuthClient to be different from original client") - } - - // Call AuthClient again to get back the cached client - cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if cachedClient != clientToUse { - t.Fatal("expected client from AuthClient to return back a cached client") - } -} - -func TestCertAuthMethod_AuthClient_withCertsReload(t *testing.T) { - clientCert, err := os.Open("./test-fixtures/keys/cert.pem") - if err != nil { - t.Fatal(err) - } - - defer clientCert.Close() - - clientKey, err := os.Open("./test-fixtures/keys/key.pem") - if err != nil { - t.Fatal(err) - } - - defer clientKey.Close() - - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "with-certs-reloaded", - "client_cert": clientCert.Name(), - "client_key": clientKey.Name(), - "reload": true, - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client == clientToUse { - t.Fatal("expected client from AuthClient to be different from original client") - } - - // Call AuthClient again to get back a new client with reloaded certificates - reloadedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if reloadedClient == clientToUse { - t.Fatal("expected client from AuthClient to return back a new client") - } -} diff --git a/command/agentproxyshared/auth/jwt/jwt.go b/command/agentproxyshared/auth/jwt/jwt.go deleted file mode 100644 index fa878274344ff..0000000000000 --- a/command/agentproxyshared/auth/jwt/jwt.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package jwt - -import ( - "context" - "errors" - "fmt" - "io/fs" - "net/http" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - "github.com/hashicorp/vault/sdk/helper/parseutil" -) - -type jwtMethod struct { - logger hclog.Logger - path string - mountPath string - role string - removeJWTAfterReading bool - removeJWTFollowsSymlinks bool - credsFound chan struct{} - watchCh chan string - stopCh chan struct{} - doneCh chan struct{} - credSuccessGate chan struct{} - ticker *time.Ticker - once *sync.Once - latestToken *atomic.Value -} - -// NewJWTAuthMethod returns an implementation of Agent's auth.AuthMethod -// interface for JWT auth. -func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - if conf.Config == nil { - return nil, errors.New("empty config data") - } - - j := &jwtMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - removeJWTAfterReading: true, - credsFound: make(chan struct{}), - watchCh: make(chan string), - stopCh: make(chan struct{}), - doneCh: make(chan struct{}), - credSuccessGate: make(chan struct{}), - once: new(sync.Once), - latestToken: new(atomic.Value), - } - j.latestToken.Store("") - - pathRaw, ok := conf.Config["path"] - if !ok { - return nil, errors.New("missing 'path' value") - } - j.path, ok = pathRaw.(string) - if !ok { - return nil, errors.New("could not convert 'path' config value to string") - } - - roleRaw, ok := conf.Config["role"] - if !ok { - return nil, errors.New("missing 'role' value") - } - j.role, ok = roleRaw.(string) - if !ok { - return nil, errors.New("could not convert 'role' config value to string") - } - - if removeJWTAfterReadingRaw, ok := conf.Config["remove_jwt_after_reading"]; ok { - removeJWTAfterReading, err := parseutil.ParseBool(removeJWTAfterReadingRaw) - if err != nil { - return nil, fmt.Errorf("error parsing 'remove_jwt_after_reading' value: %w", err) - } - j.removeJWTAfterReading = removeJWTAfterReading - } - - if removeJWTFollowsSymlinksRaw, ok := conf.Config["remove_jwt_follows_symlinks"]; ok { - removeJWTFollowsSymlinks, err := parseutil.ParseBool(removeJWTFollowsSymlinksRaw) - if err != nil { - return nil, fmt.Errorf("error parsing 'remove_jwt_follows_symlinks' value: %w", err) - } - j.removeJWTFollowsSymlinks = removeJWTFollowsSymlinks - } - - switch { - case j.path == "": - return nil, errors.New("'path' value is empty") - case j.role == "": - return nil, errors.New("'role' value is empty") - } - - // Default readPeriod - readPeriod := 1 * time.Minute - - if jwtReadPeriodRaw, ok := conf.Config["jwt_read_period"]; ok { - jwtReadPeriod, err := parseutil.ParseDurationSecond(jwtReadPeriodRaw) - if err != nil { - return nil, fmt.Errorf("error parsing 'jwt_read_period' value: %w", err) - } - readPeriod = jwtReadPeriod - } else { - // If we don't delete the JWT after reading, use a slower reload period, - // otherwise we would re-read the whole file every 500ms, instead of just - // doing a stat on the file every 500ms. - if j.removeJWTAfterReading { - readPeriod = 500 * time.Millisecond - } - } - - j.ticker = time.NewTicker(readPeriod) - - go j.runWatcher() - - j.logger.Info("jwt auth method created", "path", j.path) - - return j, nil -} - -func (j *jwtMethod) Authenticate(_ context.Context, _ *api.Client) (string, http.Header, map[string]interface{}, error) { - j.logger.Trace("beginning authentication") - - j.ingressToken() - - latestToken := j.latestToken.Load().(string) - if latestToken == "" { - return "", nil, nil, errors.New("latest known jwt is empty, cannot authenticate") - } - - return fmt.Sprintf("%s/login", j.mountPath), nil, map[string]interface{}{ - "role": j.role, - "jwt": latestToken, - }, nil -} - -func (j *jwtMethod) NewCreds() chan struct{} { - return j.credsFound -} - -func (j *jwtMethod) CredSuccess() { - j.once.Do(func() { - close(j.credSuccessGate) - }) -} - -func (j *jwtMethod) Shutdown() { - j.ticker.Stop() - close(j.stopCh) - <-j.doneCh -} - -func (j *jwtMethod) runWatcher() { - defer close(j.doneCh) - - select { - case <-j.stopCh: - return - - case <-j.credSuccessGate: - // We only start the next loop once we're initially successful, - // since at startup Authenticate will be called, and we don't want - // to end up immediately re-authenticating by having found a new - // value - } - - for { - select { - case <-j.stopCh: - return - - case <-j.ticker.C: - latestToken := j.latestToken.Load().(string) - j.ingressToken() - newToken := j.latestToken.Load().(string) - if newToken != latestToken { - j.logger.Debug("new jwt file found") - j.credsFound <- struct{}{} - } - } - } -} - -func (j *jwtMethod) ingressToken() { - fi, err := os.Lstat(j.path) - if err != nil { - if os.IsNotExist(err) { - return - } - j.logger.Error("error encountered stat'ing jwt file", "error", err) - return - } - - // Check that the path refers to a file. - // If it's a symlink, it could still be a symlink to a directory, - // but os.ReadFile below will return a descriptive error. - evalSymlinkPath := j.path - switch mode := fi.Mode(); { - case mode.IsRegular(): - // regular file - case mode&fs.ModeSymlink != 0: - // If our file path is a symlink, we should also return early (like above) without error - // if the file that is linked to is not present, otherwise we will error when trying - // to read that file by following the link in the os.ReadFile call. - evalSymlinkPath, err = filepath.EvalSymlinks(j.path) - if err != nil { - j.logger.Error("error encountered evaluating symlinks", "error", err) - return - } - _, err := os.Stat(evalSymlinkPath) - if err != nil { - if os.IsNotExist(err) { - return - } - j.logger.Error("error encountered stat'ing jwt file after evaluating symlinks", "error", err) - return - } - default: - j.logger.Error("jwt file is not a regular file or symlink") - return - } - - token, err := os.ReadFile(j.path) - if err != nil { - j.logger.Error("failed to read jwt file", "error", err) - return - } - - switch len(token) { - case 0: - j.logger.Warn("empty jwt file read") - - default: - j.latestToken.Store(string(token)) - } - - if j.removeJWTAfterReading { - pathToRemove := j.path - if j.removeJWTFollowsSymlinks { - // If removeJWTFollowsSymlinks is set, we follow the symlink and delete the jwt, - // not just the symlink that links to the jwt - pathToRemove = evalSymlinkPath - } - if err := os.Remove(pathToRemove); err != nil { - j.logger.Error("error removing jwt file", "error", err) - } - } -} diff --git a/command/agentproxyshared/auth/jwt/jwt_test.go b/command/agentproxyshared/auth/jwt/jwt_test.go deleted file mode 100644 index 3e0db40902478..0000000000000 --- a/command/agentproxyshared/auth/jwt/jwt_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package jwt - -import ( - "bytes" - "os" - "path" - "strings" - "sync/atomic" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/auth" -) - -func TestIngressToken(t *testing.T) { - const ( - dir = "dir" - file = "file" - empty = "empty" - missing = "missing" - symlinked = "symlinked" - ) - - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") - if err != nil { - t.Fatalf("failed to create temp dir: %s", err) - } - defer os.RemoveAll(rootDir) - - setupTestDir := func() string { - testDir, err := os.MkdirTemp(rootDir, "") - if err != nil { - t.Fatal(err) - } - err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) - if err != nil { - t.Fatal(err) - } - _, err = os.Create(path.Join(testDir, empty)) - if err != nil { - t.Fatal(err) - } - err = os.Mkdir(path.Join(testDir, dir), 0o755) - if err != nil { - t.Fatal(err) - } - err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) - if err != nil { - t.Fatal(err) - } - - return testDir - } - - for _, tc := range []struct { - name string - path string - errString string - }{ - { - "happy path", - file, - "", - }, - { - "path is directory", - dir, - "[ERROR] jwt file is not a regular file or symlink", - }, - { - "path is symlink", - symlinked, - "", - }, - { - "path is missing (implies nothing for ingressToken to do)", - missing, - "", - }, - { - "path is empty file", - empty, - "[WARN] empty jwt file read", - }, - } { - testDir := setupTestDir() - logBuffer := bytes.Buffer{} - jwtAuth := &jwtMethod{ - logger: hclog.New(&hclog.LoggerOptions{ - Output: &logBuffer, - }), - latestToken: new(atomic.Value), - path: path.Join(testDir, tc.path), - } - - jwtAuth.ingressToken() - - if tc.errString != "" { - if !strings.Contains(logBuffer.String(), tc.errString) { - t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) - } - } else { - if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { - t.Fatal("logs contained unexpected error", logBuffer.String()) - } - } - } -} - -func TestDeleteAfterReading(t *testing.T) { - for _, tc := range map[string]struct { - configValue string - shouldDelete bool - }{ - "default": { - "", - true, - }, - "explicit true": { - "true", - true, - }, - "false": { - "false", - false, - }, - } { - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") - if err != nil { - t.Fatalf("failed to create temp dir: %s", err) - } - defer os.RemoveAll(rootDir) - tokenPath := path.Join(rootDir, "token") - err = os.WriteFile(tokenPath, []byte("test"), 0o644) - if err != nil { - t.Fatal(err) - } - - config := &auth.AuthConfig{ - Config: map[string]interface{}{ - "path": tokenPath, - "role": "unusedrole", - }, - Logger: hclog.Default(), - } - if tc.configValue != "" { - config.Config["remove_jwt_after_reading"] = tc.configValue - } - - jwtAuth, err := NewJWTAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - jwtAuth.(*jwtMethod).ingressToken() - - if _, err := os.Lstat(tokenPath); tc.shouldDelete { - if err == nil || !os.IsNotExist(err) { - t.Fatal(err) - } - } else { - if err != nil { - t.Fatal(err) - } - } - } -} - -func TestDeleteAfterReadingSymlink(t *testing.T) { - for _, tc := range map[string]struct { - configValue string - shouldDelete bool - removeJWTFollowsSymlinks bool - }{ - "default": { - "", - true, - false, - }, - "explicit true": { - "true", - true, - false, - }, - "false": { - "false", - false, - false, - }, - "default + removeJWTFollowsSymlinks": { - "", - true, - true, - }, - "explicit true + removeJWTFollowsSymlinks": { - "true", - true, - true, - }, - "false + removeJWTFollowsSymlinks": { - "false", - false, - true, - }, - } { - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") - if err != nil { - t.Fatalf("failed to create temp dir: %s", err) - } - defer os.RemoveAll(rootDir) - tokenPath := path.Join(rootDir, "token") - err = os.WriteFile(tokenPath, []byte("test"), 0o644) - if err != nil { - t.Fatal(err) - } - - symlink, err := os.CreateTemp("", "auth.jwt.symlink.test.") - if err != nil { - t.Fatal(err) - } - symlinkName := symlink.Name() - symlink.Close() - os.Remove(symlinkName) - os.Symlink(tokenPath, symlinkName) - - config := &auth.AuthConfig{ - Config: map[string]interface{}{ - "path": symlinkName, - "role": "unusedrole", - }, - Logger: hclog.Default(), - } - if tc.configValue != "" { - config.Config["remove_jwt_after_reading"] = tc.configValue - } - config.Config["remove_jwt_follows_symlinks"] = tc.removeJWTFollowsSymlinks - - jwtAuth, err := NewJWTAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - jwtAuth.(*jwtMethod).ingressToken() - - pathToCheck := symlinkName - if tc.removeJWTFollowsSymlinks { - pathToCheck = tokenPath - } - if _, err := os.Lstat(pathToCheck); tc.shouldDelete { - if err == nil || !os.IsNotExist(err) { - t.Fatal(err) - } - } else { - if err != nil { - t.Fatal(err) - } - } - } -} diff --git a/command/agentproxyshared/auth/oci/oci.go b/command/agentproxyshared/auth/oci/oci.go deleted file mode 100644 index ec4d9ebf750b5..0000000000000 --- a/command/agentproxyshared/auth/oci/oci.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package oci - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "os/user" - "path" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - "github.com/oracle/oci-go-sdk/common" - ociAuth "github.com/oracle/oci-go-sdk/common/auth" -) - -const ( - typeAPIKey = "apikey" - typeInstance = "instance" - - /* - - IAM creds can be inferred from instance metadata or the container - identity service, and those creds expire at varying intervals with - new creds becoming available at likewise varying intervals. Let's - default to polling once a minute so all changes can be picked up - rather quickly. This is configurable, however. - - */ - defaultCredCheckFreqSeconds = 60 * time.Second - - defaultConfigFileName = "config" - defaultConfigDirName = ".oci" - configFilePathEnvVarName = "OCI_CONFIG_FILE" - secondaryConfigDirName = ".oraclebmc" -) - -func NewOCIAuthMethod(conf *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - if conf.Config == nil { - return nil, errors.New("empty config data") - } - - a := &ociMethod{ - logger: conf.Logger, - vaultAddress: vaultAddress, - mountPath: conf.MountPath, - credsFound: make(chan struct{}), - stopCh: make(chan struct{}), - } - - typeRaw, ok := conf.Config["type"] - if !ok { - return nil, errors.New("missing 'type' value") - } - authType, ok := typeRaw.(string) - if !ok { - return nil, errors.New("could not convert 'type' config value to string") - } - - roleRaw, ok := conf.Config["role"] - if !ok { - return nil, errors.New("missing 'role' value") - } - a.role, ok = roleRaw.(string) - if !ok { - return nil, errors.New("could not convert 'role' config value to string") - } - - // Check for an optional custom frequency at which we should poll for creds. - credCheckFreqSec := defaultCredCheckFreqSeconds - if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok { - checkFreq, err := parseutil.ParseDurationSecond(checkFreqRaw) - if err != nil { - return nil, fmt.Errorf("could not parse credential_poll_interval: %v", err) - } - credCheckFreqSec = checkFreq - } - - switch { - case a.role == "": - return nil, errors.New("'role' value is empty") - case authType == "": - return nil, errors.New("'type' value is empty") - case authType != typeAPIKey && authType != typeInstance: - return nil, errors.New("'type' value is invalid") - case authType == typeAPIKey: - defaultConfigFile := getDefaultConfigFilePath() - homeFolder := getHomeFolder() - secondaryConfigFile := path.Join(homeFolder, secondaryConfigDirName, defaultConfigFileName) - - environmentProvider := common.ConfigurationProviderEnvironmentVariables("OCI", "") - defaultFileProvider, _ := common.ConfigurationProviderFromFile(defaultConfigFile, "") - secondaryFileProvider, _ := common.ConfigurationProviderFromFile(secondaryConfigFile, "") - - provider, _ := common.ComposingConfigurationProvider([]common.ConfigurationProvider{environmentProvider, defaultFileProvider, secondaryFileProvider}) - a.configurationProvider = provider - case authType == typeInstance: - configurationProvider, err := ociAuth.InstancePrincipalConfigurationProvider() - if err != nil { - return nil, fmt.Errorf("failed to create instance principal configuration provider: %v", err) - } - a.configurationProvider = configurationProvider - } - - // Do an initial population of the creds because we want to err right away if we can't - // even get a first set. - creds, err := a.configurationProvider.KeyID() - if err != nil { - return nil, err - } - a.lastCreds = creds - - go a.pollForCreds(credCheckFreqSec) - - return a, nil -} - -type ociMethod struct { - logger hclog.Logger - vaultAddress string - mountPath string - - configurationProvider common.ConfigurationProvider - role string - - // These are used to share the latest creds safely across goroutines. - credLock sync.Mutex - lastCreds string - - // Notifies the outer environment that it should call Authenticate again. - credsFound chan struct{} - - // Detects that the outer environment is closing. - stopCh chan struct{} -} - -func (a *ociMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) { - a.credLock.Lock() - defer a.credLock.Unlock() - - a.logger.Trace("beginning authentication") - - requestPath := fmt.Sprintf("/v1/%s/login/%s", a.mountPath, a.role) - requestURL := fmt.Sprintf("%s%s", a.vaultAddress, requestPath) - - request, err := http.NewRequest("GET", requestURL, nil) - if err != nil { - return "", nil, nil, fmt.Errorf("error creating authentication request: %w", err) - } - - request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) - - signer := common.DefaultRequestSigner(a.configurationProvider) - - err = signer.Sign(request) - - if err != nil { - return "", nil, nil, fmt.Errorf("error signing authentication request: %w", err) - } - - parsedVaultAddress, err := url.Parse(a.vaultAddress) - if err != nil { - return "", nil, nil, fmt.Errorf("unable to parse vault address: %w", err) - } - - request.Header.Set("Host", parsedVaultAddress.Host) - request.Header.Set("(request-target)", fmt.Sprintf("%s %s", "get", requestPath)) - - data := map[string]interface{}{ - "request_headers": request.Header, - } - - return fmt.Sprintf("%s/login/%s", a.mountPath, a.role), nil, data, nil -} - -func (a *ociMethod) NewCreds() chan struct{} { - return a.credsFound -} - -func (a *ociMethod) CredSuccess() {} - -func (a *ociMethod) Shutdown() { - close(a.credsFound) - close(a.stopCh) -} - -func (a *ociMethod) pollForCreds(frequency time.Duration) { - ticker := time.NewTicker(frequency) - defer ticker.Stop() - for { - select { - case <-a.stopCh: - a.logger.Trace("shutdown triggered, stopping OCI auth handler") - return - case <-ticker.C: - if err := a.checkCreds(); err != nil { - a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err) - } - } - } -} - -func (a *ociMethod) checkCreds() error { - a.credLock.Lock() - defer a.credLock.Unlock() - - a.logger.Trace("checking for new credentials") - currentCreds, err := a.configurationProvider.KeyID() - if err != nil { - return err - } - // These will always have different pointers regardless of whether their - // values are identical, hence the use of DeepEqual. - if currentCreds == a.lastCreds { - a.logger.Trace("credentials are unchanged") - return nil - } - a.lastCreds = currentCreds - a.logger.Trace("new credentials detected, triggering Authenticate") - a.credsFound <- struct{}{} - return nil -} - -func getHomeFolder() string { - current, e := user.Current() - if e != nil { - // Give up and try to return something sensible - home, err := os.UserHomeDir() - if err != nil { - return "" - } - return home - } - return current.HomeDir -} - -func getDefaultConfigFilePath() string { - homeFolder := getHomeFolder() - defaultConfigFile := path.Join(homeFolder, defaultConfigDirName, defaultConfigFileName) - if _, err := os.Stat(defaultConfigFile); err == nil { - return defaultConfigFile - } - - // Read configuration file path from OCI_CONFIG_FILE env var - fallbackConfigFile, existed := os.LookupEnv(configFilePathEnvVarName) - if !existed { - return defaultConfigFile - } - if _, err := os.Stat(fallbackConfigFile); os.IsNotExist(err) { - return defaultConfigFile - } - return fallbackConfigFile -} diff --git a/command/agentproxyshared/auth/token-file/token_file.go b/command/agentproxyshared/auth/token-file/token_file.go deleted file mode 100644 index 4c7eaa22aba4a..0000000000000 --- a/command/agentproxyshared/auth/token-file/token_file.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package token_file - -import ( - "context" - "errors" - "fmt" - "net/http" - "os" - "strings" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/auth" -) - -type tokenFileMethod struct { - logger hclog.Logger - mountPath string - - cachedToken string - tokenFilePath string -} - -func NewTokenFileAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - if conf.Config == nil { - return nil, errors.New("empty config data") - } - - a := &tokenFileMethod{ - logger: conf.Logger, - mountPath: "auth/token", - } - - tokenFilePathRaw, ok := conf.Config["token_file_path"] - if !ok { - return nil, errors.New("missing 'token_file_path' value") - } - a.tokenFilePath, ok = tokenFilePathRaw.(string) - if !ok { - return nil, errors.New("could not convert 'token_file_path' config value to string") - } - if a.tokenFilePath == "" { - return nil, errors.New("'token_file_path' value is empty") - } - - return a, nil -} - -func (a *tokenFileMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { - token, err := os.ReadFile(a.tokenFilePath) - if err != nil { - if a.cachedToken == "" { - return "", nil, nil, fmt.Errorf("error reading token file and no cached token known: %w", err) - } - a.logger.Warn("error reading token file", "error", err) - } - if len(token) == 0 { - if a.cachedToken == "" { - return "", nil, nil, errors.New("token file empty and no cached token known") - } - a.logger.Warn("token file exists but read empty value, re-using cached value") - } else { - a.cachedToken = strings.TrimSpace(string(token)) - } - - // i.e. auth/token/lookup-self - return fmt.Sprintf("%s/lookup-self", a.mountPath), nil, map[string]interface{}{ - "token": a.cachedToken, - }, nil -} - -func (a *tokenFileMethod) NewCreds() chan struct{} { - return nil -} - -func (a *tokenFileMethod) CredSuccess() { -} - -func (a *tokenFileMethod) Shutdown() { -} diff --git a/command/agentproxyshared/auth/token-file/token_file_test.go b/command/agentproxyshared/auth/token-file/token_file_test.go deleted file mode 100644 index eb89fc02350e1..0000000000000 --- a/command/agentproxyshared/auth/token-file/token_file_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package token_file - -import ( - "os" - "path/filepath" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -func TestNewTokenFileAuthMethodEmptyConfig(t *testing.T) { - logger := logging.NewVaultLogger(log.Trace) - _, err := NewTokenFileAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.method"), - Config: map[string]interface{}{}, - }) - if err == nil { - t.Fatal("Expected error due to empty config") - } -} - -func TestNewTokenFileEmptyFilePath(t *testing.T) { - logger := logging.NewVaultLogger(log.Trace) - _, err := NewTokenFileAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.method"), - Config: map[string]interface{}{ - "token_file_path": "", - }, - }) - if err == nil { - t.Fatalf("Expected error when giving empty file path") - } -} - -func TestNewTokenFileAuthenticate(t *testing.T) { - tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file")) - tokenFileContents := "super-secret-token" - if err != nil { - t.Fatal(err) - } - tokenFileName := tokenFile.Name() - tokenFile.Close() // WriteFile doesn't need it open - os.WriteFile(tokenFileName, []byte(tokenFileContents), 0o666) - defer os.Remove(tokenFileName) - - logger := logging.NewVaultLogger(log.Trace) - am, err := NewTokenFileAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.method"), - Config: map[string]interface{}{ - "token_file_path": tokenFileName, - }, - }) - if err != nil { - t.Fatal(err) - } - - path, headers, data, err := am.Authenticate(nil, nil) - if err != nil { - t.Fatal(err) - } - if path != "auth/token/lookup-self" { - t.Fatalf("Incorrect path, was %s", path) - } - if headers != nil { - t.Fatalf("Expected no headers, instead got %v", headers) - } - if data == nil { - t.Fatal("Data was nil") - } - tokenDataFromAuthMethod := data["token"].(string) - if tokenDataFromAuthMethod != tokenFileContents { - t.Fatalf("Incorrect token file contents return by auth method, expected %s, got %s", tokenFileContents, tokenDataFromAuthMethod) - } - - _, err = os.Stat(tokenFileName) - if err != nil { - t.Fatal("Token file removed") - } -} diff --git a/command/agentproxyshared/cache/api_proxy.go b/command/agentproxyshared/cache/api_proxy.go deleted file mode 100644 index e03bc1570b3c6..0000000000000 --- a/command/agentproxyshared/cache/api_proxy.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "fmt" - gohttp "net/http" - "sync" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/http" -) - -type EnforceConsistency int - -const ( - EnforceConsistencyNever EnforceConsistency = iota - EnforceConsistencyAlways -) - -type WhenInconsistentAction int - -const ( - WhenInconsistentFail WhenInconsistentAction = iota - WhenInconsistentRetry - WhenInconsistentForward -) - -// APIProxy is an implementation of the proxier interface that is used to -// forward the request to Vault and get the response. -type APIProxy struct { - client *api.Client - logger hclog.Logger - enforceConsistency EnforceConsistency - whenInconsistentAction WhenInconsistentAction - l sync.RWMutex - lastIndexStates []string - userAgentString string - userAgentStringFunction func(string) string -} - -var _ Proxier = &APIProxy{} - -type APIProxyConfig struct { - Client *api.Client - Logger hclog.Logger - EnforceConsistency EnforceConsistency - WhenInconsistentAction WhenInconsistentAction - // UserAgentString is used as the User Agent when the proxied client - // does not have a user agent of its own. - UserAgentString string - // UserAgentStringFunction is the function to transform the proxied client's - // user agent into one that includes Vault-specific information. - UserAgentStringFunction func(string) string -} - -func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { - if config.Client == nil { - return nil, fmt.Errorf("nil API client") - } - return &APIProxy{ - client: config.Client, - logger: config.Logger, - enforceConsistency: config.EnforceConsistency, - whenInconsistentAction: config.WhenInconsistentAction, - userAgentString: config.UserAgentString, - userAgentStringFunction: config.UserAgentStringFunction, - }, nil -} - -func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - client, err := ap.client.Clone() - if err != nil { - return nil, err - } - client.SetToken(req.Token) - - // Derive and set a logger for the client - clientLogger := ap.logger.Named("client") - client.SetLogger(clientLogger) - - // http.Transport will transparently request gzip and decompress the response, but only if - // the client doesn't manually set the header. Removing any Accept-Encoding header allows the - // transparent compression to occur. - req.Request.Header.Del("Accept-Encoding") - - if req.Request.Header == nil { - req.Request.Header = make(gohttp.Header) - } - - // Set our User-Agent to be one indicating we are Vault Agent's API proxy. - // If the sending client had one, preserve it. - if req.Request.Header.Get("User-Agent") != "" { - initialUserAgent := req.Request.Header.Get("User-Agent") - req.Request.Header.Set("User-Agent", ap.userAgentStringFunction(initialUserAgent)) - } else { - req.Request.Header.Set("User-Agent", ap.userAgentString) - } - - client.SetHeaders(req.Request.Header) - - fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) - fwReq.BodyBytes = req.RequestBody - - query := req.Request.URL.Query() - if len(query) != 0 { - fwReq.Params = query - } - - var newState string - manageState := ap.enforceConsistency == EnforceConsistencyAlways && - req.Request.Header.Get(http.VaultIndexHeaderName) == "" && - req.Request.Header.Get(http.VaultForwardHeaderName) == "" && - req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" - - if manageState { - client = client.WithResponseCallbacks(api.RecordState(&newState)) - ap.l.RLock() - lastStates := ap.lastIndexStates - ap.l.RUnlock() - if len(lastStates) != 0 { - client = client.WithRequestCallbacks(api.RequireState(lastStates...)) - switch ap.whenInconsistentAction { - case WhenInconsistentFail: - // In this mode we want to delegate handling of inconsistency - // failures to the external client talking to Agent. - client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) - case WhenInconsistentRetry: - // In this mode we want to handle retries due to inconsistency - // internally. This is the default api.Client behaviour so - // we needn't do anything. - case WhenInconsistentForward: - fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) - } - } - } - - // Make the request to Vault and get the response - ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) - - resp, err := client.RawRequestWithContext(ctx, fwReq) - if resp == nil && err != nil { - // We don't want to cache nil responses, so we simply return the error - return nil, err - } - - if newState != "" { - ap.l.Lock() - // We want to be using the "newest" states seen, but newer isn't well - // defined here. There can be two states S1 and S2 which aren't strictly ordered: - // S1 could have a newer localindex and S2 could have a newer replicatedindex. So - // we need to merge them. But we can't merge them because we wouldn't be able to - // "sign" the resulting header because we don't have access to the HMAC key that - // Vault uses to do so. So instead we compare any of the 0-2 saved states - // we have to the new header, keeping the newest 1-2 of these, and sending - // them to Vault to evaluate. - ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) - ap.l.Unlock() - } - - // Before error checking from the request call, we'd want to initialize a SendResponse to - // potentially return - sendResponse, newErr := NewSendResponse(resp, nil) - if newErr != nil { - return nil, newErr - } - - // Bubble back the api.Response as well for error checking/handling at the handler layer. - return sendResponse, err -} diff --git a/command/agentproxyshared/cache/api_proxy_test.go b/command/agentproxyshared/cache/api_proxy_test.go deleted file mode 100644 index 6671b17fdaa83..0000000000000 --- a/command/agentproxyshared/cache/api_proxy_test.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "testing" - "time" - - "github.com/hashicorp/vault/helper/useragent" - - "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -const policyAdmin = ` -path "*" { - capabilities = ["sudo", "create", "read", "update", "delete", "list"] -} -` - -func TestAPIProxy(t *testing.T) { - cleanup, client, _, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), - UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, - UserAgentString: useragent.ProxyAPIProxyString(), - }) - if err != nil { - t.Fatal(err) - } - - r := client.NewRequest("GET", "/v1/sys/health") - req, err := r.ToHTTP() - if err != nil { - t.Fatal(err) - } - - resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ - Request: req, - }) - if err != nil { - t.Fatal(err) - } - - var result api.HealthResponse - err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) - if err != nil { - t.Fatal(err) - } - - if !result.Initialized || result.Sealed || result.Standby { - t.Fatalf("bad sys/health response: %#v", result) - } -} - -func TestAPIProxyNoCache(t *testing.T) { - cleanup, client, _, _ := setupClusterAndAgentNoCache(namespace.RootContext(nil), t, nil) - defer cleanup() - - proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), - UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, - UserAgentString: useragent.ProxyAPIProxyString(), - }) - if err != nil { - t.Fatal(err) - } - - r := client.NewRequest("GET", "/v1/sys/health") - req, err := r.ToHTTP() - if err != nil { - t.Fatal(err) - } - - resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ - Request: req, - }) - if err != nil { - t.Fatal(err) - } - - var result api.HealthResponse - err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) - if err != nil { - t.Fatal(err) - } - - if !result.Initialized || result.Sealed || result.Standby { - t.Fatalf("bad sys/health response: %#v", result) - } -} - -func TestAPIProxy_queryParams(t *testing.T) { - // Set up an agent that points to a standby node for this particular test - // since it needs to proxy a /sys/health?standbyok=true request to a standby - cleanup, client, _, _ := setupClusterAndAgentOnStandby(namespace.RootContext(nil), t, nil) - defer cleanup() - - proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), - UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, - UserAgentString: useragent.ProxyAPIProxyString(), - }) - if err != nil { - t.Fatal(err) - } - - r := client.NewRequest("GET", "/v1/sys/health") - req, err := r.ToHTTP() - if err != nil { - t.Fatal(err) - } - - // Add a query parameter for testing - q := req.URL.Query() - q.Add("standbyok", "true") - req.URL.RawQuery = q.Encode() - - resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ - Request: req, - }) - if err != nil { - t.Fatal(err) - } - - var result api.HealthResponse - err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) - if err != nil { - t.Fatal(err) - } - - if !result.Initialized || result.Sealed || !result.Standby { - t.Fatalf("bad sys/health response: %#v", result) - } - - if resp.Response.StatusCode != http.StatusOK { - t.Fatalf("exptected standby to return 200, got: %v", resp.Response.StatusCode) - } -} - -// setupClusterAndAgent is a helper func used to set up a test cluster and -// caching agent against the active node. It returns a cleanup func that should -// be deferred immediately along with two clients, one for direct cluster -// communication and another to talk to the caching agent. -func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { - return setupClusterAndAgentCommon(ctx, t, coreConfig, false, true) -} - -// setupClusterAndAgentNoCache is a helper func used to set up a test cluster and -// proxying agent against the active node. It returns a cleanup func that should -// be deferred immediately along with two clients, one for direct cluster -// communication and another to talk to the caching agent. -func setupClusterAndAgentNoCache(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { - return setupClusterAndAgentCommon(ctx, t, coreConfig, false, false) -} - -// setupClusterAndAgentOnStandby is a helper func used to set up a test cluster -// and caching agent against a standby node. It returns a cleanup func that -// should be deferred immediately along with two clients, one for direct cluster -// communication and another to talk to the caching agent. -func setupClusterAndAgentOnStandby(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { - return setupClusterAndAgentCommon(ctx, t, coreConfig, true, true) -} - -func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig, onStandby bool, useCache bool) (func(), *api.Client, *api.Client, *LeaseCache) { - t.Helper() - - if ctx == nil { - ctx = context.Background() - } - - // Handle sane defaults - if coreConfig == nil { - coreConfig = &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: logging.NewVaultLogger(hclog.Trace), - } - } - - // Always set up the userpass backend since we use that to generate an admin - // token for the client that will make proxied requests to through the agent. - if coreConfig.CredentialBackends == nil || coreConfig.CredentialBackends["userpass"] == nil { - coreConfig.CredentialBackends = map[string]logical.Factory{ - "userpass": userpass.Factory, - } - } - - // Init new test cluster - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - - activeClient := cores[0].Client - standbyClient := cores[1].Client - - // clienToUse is the client for the agent to point to. - clienToUse := activeClient - if onStandby { - clienToUse = standbyClient - } - - // Add an admin policy - if err := activeClient.Sys().PutPolicy("admin", policyAdmin); err != nil { - t.Fatal(err) - } - - // Set up the userpass auth backend and an admin user. Used for getting a token - // for the agent later down in this func. - err := activeClient.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ - Type: "userpass", - }) - if err != nil { - t.Fatal(err) - } - - _, err = activeClient.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ - "password": "bar", - "policies": []string{"admin"}, - }) - if err != nil { - t.Fatal(err) - } - - // Set up env vars for agent consumption - origEnvVaultAddress := os.Getenv(api.EnvVaultAddress) - os.Setenv(api.EnvVaultAddress, clienToUse.Address()) - - origEnvVaultCACert := os.Getenv(api.EnvVaultCACert) - os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) - - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - apiProxyLogger := logging.NewVaultLogger(hclog.Trace).Named("apiproxy") - - // Create the API proxier - apiProxy, err := NewAPIProxy(&APIProxyConfig{ - Client: clienToUse, - Logger: apiProxyLogger, - UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, - UserAgentString: useragent.ProxyAPIProxyString(), - }) - if err != nil { - t.Fatal(err) - } - - // Create a muxer and add paths relevant for the lease cache layer and API proxy layer - mux := http.NewServeMux() - - var leaseCache *LeaseCache - if useCache { - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") - - // Create the lease cache proxier and set its underlying proxier to - // the API proxier. - leaseCache, err = NewLeaseCache(&LeaseCacheConfig{ - Client: clienToUse, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), - }) - if err != nil { - t.Fatal(err) - } - - mux.Handle("/agent/v1/cache-clear", leaseCache.HandleCacheClear(ctx)) - - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true)) - } else { - mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true)) - } - - server := &http.Server{ - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: apiProxyLogger.StandardLogger(nil), - } - go server.Serve(listener) - - // testClient is the client that is used to talk to the agent for proxying/caching behavior. - testClient, err := activeClient.Clone() - if err != nil { - t.Fatal(err) - } - - if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { - t.Fatal(err) - } - - // Login via userpass method to derive a managed token. Set that token as the - // testClient's token - resp, err := testClient.Logical().Write("auth/userpass/login/foo", map[string]interface{}{ - "password": "bar", - }) - if err != nil { - t.Fatal(err) - } - testClient.SetToken(resp.Auth.ClientToken) - - cleanup := func() { - // We wait for a tiny bit for things such as agent renewal to exit properly - time.Sleep(50 * time.Millisecond) - - cluster.Cleanup() - os.Setenv(api.EnvVaultAddress, origEnvVaultAddress) - os.Setenv(api.EnvVaultCACert, origEnvVaultCACert) - listener.Close() - } - - return cleanup, clienToUse, testClient, leaseCache -} diff --git a/command/agentproxyshared/cache/listener.go b/command/agentproxyshared/cache/listener.go deleted file mode 100644 index c8ed722191480..0000000000000 --- a/command/agentproxyshared/cache/listener.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/hashicorp/go-secure-stdlib/reloadutil" - "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/internalshared/listenerutil" -) - -type ListenerBundle struct { - Listener net.Listener - TLSConfig *tls.Config - TLSReloadFunc reloadutil.ReloadFunc -} - -func StartListener(lnConfig *configutil.Listener) (*ListenerBundle, error) { - addr := lnConfig.Address - - var ln net.Listener - var err error - switch lnConfig.Type { - case "tcp": - if addr == "" { - addr = "127.0.0.1:8200" - } - - bindProto := "tcp" - // If they've passed 0.0.0.0, we only want to bind on IPv4 - // rather than golang's dual stack default - if strings.HasPrefix(addr, "0.0.0.0:") { - bindProto = "tcp4" - } - - ln, err = net.Listen(bindProto, addr) - if err != nil { - return nil, err - } - ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} - - case "unix": - var uConfig *listenerutil.UnixSocketsConfig - if lnConfig.SocketMode != "" && - lnConfig.SocketUser != "" && - lnConfig.SocketGroup != "" { - uConfig = &listenerutil.UnixSocketsConfig{ - Mode: lnConfig.SocketMode, - User: lnConfig.SocketUser, - Group: lnConfig.SocketGroup, - } - } - ln, err = listenerutil.UnixSocketListener(addr, uConfig) - if err != nil { - return nil, err - } - - default: - return nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) - } - - props := map[string]string{"addr": ln.Addr().String()} - tlsConf, reloadFunc, err := listenerutil.TLSConfig(lnConfig, props, nil) - if err != nil { - return nil, err - } - if tlsConf != nil { - ln = tls.NewListener(ln, tlsConf) - } - - cfg := &ListenerBundle{ - Listener: ln, - TLSConfig: tlsConf, - TLSReloadFunc: reloadFunc, - } - - return cfg, nil -} diff --git a/command/agentproxyshared/cache/proxy.go b/command/agentproxyshared/cache/proxy.go deleted file mode 100644 index 4dcd1803389fb..0000000000000 --- a/command/agentproxyshared/cache/proxy.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "bytes" - "context" - "io" - "net/http" - "time" - - "github.com/hashicorp/vault/api" -) - -// SendRequest is the input for Proxier.Send. -type SendRequest struct { - Token string - Request *http.Request - - // RequestBody is the stored body bytes from Request.Body. It is set here to - // avoid reading and re-setting the stream multiple times. - RequestBody []byte -} - -// SendResponse is the output from Proxier.Send. -type SendResponse struct { - Response *api.Response - - // ResponseBody is the stored body bytes from Response.Body. It is set here to - // avoid reading and re-setting the stream multiple times. - ResponseBody []byte - CacheMeta *CacheMeta -} - -// CacheMeta contains metadata information about the response, -// such as whether it was a cache hit or miss, and the age of the -// cached entry. -type CacheMeta struct { - Hit bool - Age time.Duration -} - -// Proxier is the interface implemented by different components that are -// responsible for performing specific tasks, such as caching and proxying. All -// these tasks combined together would serve the request received by the agent. -type Proxier interface { - Send(ctx context.Context, req *SendRequest) (*SendResponse, error) -} - -// NewSendResponse creates a new SendResponse and takes care of initializing its -// fields properly. -func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { - resp := &SendResponse{ - Response: apiResponse, - CacheMeta: &CacheMeta{}, - } - - // If a response body is separately provided we set that as the SendResponse.ResponseBody, - // otherwise we will do an ioutil.ReadAll to extract the response body from apiResponse. - switch { - case len(responseBody) > 0: - resp.ResponseBody = responseBody - case apiResponse.Body != nil: - respBody, err := io.ReadAll(apiResponse.Body) - if err != nil { - return nil, err - } - // Close the old body - apiResponse.Body.Close() - - // Re-set the response body after reading from the Reader - apiResponse.Body = io.NopCloser(bytes.NewReader(respBody)) - - resp.ResponseBody = respBody - } - - return resp, nil -} diff --git a/command/agentproxyshared/cache/testing.go b/command/agentproxyshared/cache/testing.go deleted file mode 100644 index 9fe9e6f1345c0..0000000000000 --- a/command/agentproxyshared/cache/testing.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "math/rand" - "net/http" - "strings" - "time" - - "github.com/hashicorp/vault/helper/useragent" - - "github.com/hashicorp/vault/api" -) - -// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. -// The mock will return the provided responses every time it reaches its Send method, up to -// the last provided response. This lets tests control what the next/underlying Proxier layer -// might expect to return. -type mockProxier struct { - proxiedResponses []*SendResponse - responseIndex int -} - -func NewMockProxier(responses []*SendResponse) *mockProxier { - return &mockProxier{ - proxiedResponses: responses, - } -} - -func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - if p.responseIndex >= len(p.proxiedResponses) { - return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) - } - resp := p.proxiedResponses[p.responseIndex] - - p.responseIndex++ - - return resp, nil -} - -func (p *mockProxier) ResponseIndex() int { - return p.responseIndex -} - -func newTestSendResponse(status int, body string) *SendResponse { - headers := make(http.Header) - headers.Add("User-Agent", useragent.AgentProxyString()) - resp := &SendResponse{ - Response: &api.Response{ - Response: &http.Response{ - StatusCode: status, - Header: headers, - }, - }, - } - resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) - - if body != "" { - resp.Response.Body = ioutil.NopCloser(strings.NewReader(body)) - resp.ResponseBody = []byte(body) - } - - if json.Valid([]byte(body)) { - resp.Response.Header.Set("content-type", "application/json") - } - - return resp -} - -type mockTokenVerifierProxier struct { - currentToken string -} - -func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - p.currentToken = req.Token - resp := newTestSendResponse(http.StatusOK, - `{"data": {"id": "`+p.currentToken+`"}}`) - - return resp, nil -} - -func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { - return p.currentToken -} - -type mockDelayProxier struct { - cacheableResp bool - delay int -} - -func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - if p.delay > 0 { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(time.Duration(p.delay) * time.Millisecond): - } - } - - // If this is a cacheable response, we return a unique response every time - if p.cacheableResp { - rand.Seed(time.Now().Unix()) - s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) - return newTestSendResponse(http.StatusOK, s), nil - } - - return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil -} diff --git a/command/agentproxyshared/helpers.go b/command/agentproxyshared/helpers.go deleted file mode 100644 index d1487174bdf1a..0000000000000 --- a/command/agentproxyshared/helpers.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package agentproxyshared - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" - "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" - "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" - "github.com/hashicorp/vault/command/agentproxyshared/auth/azure" - "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" - "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" - "github.com/hashicorp/vault/command/agentproxyshared/auth/gcp" - "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" - "github.com/hashicorp/vault/command/agentproxyshared/auth/kerberos" - "github.com/hashicorp/vault/command/agentproxyshared/auth/kubernetes" - "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" - token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" - "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" - "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" -) - -// GetAutoAuthMethodFromConfig Calls the appropriate NewAutoAuthMethod function, initializing -// the auto-auth method, based on the auto-auth method type. Returns an error if one happens or -// the method type is invalid. -func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { - switch autoAuthMethodType { - case "alicloud": - return alicloud.NewAliCloudAuthMethod(authConfig) - case "aws": - return aws.NewAWSAuthMethod(authConfig) - case "azure": - return azure.NewAzureAuthMethod(authConfig) - case "cert": - return cert.NewCertAuthMethod(authConfig) - case "cf": - return cf.NewCFAuthMethod(authConfig) - case "gcp": - return gcp.NewGCPAuthMethod(authConfig) - case "jwt": - return jwt.NewJWTAuthMethod(authConfig) - case "kerberos": - return kerberos.NewKerberosAuthMethod(authConfig) - case "kubernetes": - return kubernetes.NewKubernetesAuthMethod(authConfig) - case "approle": - return approle.NewApproleAuthMethod(authConfig) - case "oci": - return oci.NewOCIAuthMethod(authConfig, vaultAddress) - case "token_file": - return token_file.NewTokenFileAuthMethod(authConfig) - case "pcf": // Deprecated. - return cf.NewCFAuthMethod(authConfig) - default: - return nil, errors.New(fmt.Sprintf("unknown auth method %q", autoAuthMethodType)) - } -} - -// PersistConfig contains configuration needed for persistent caching -type PersistConfig struct { - Type string - Path string `hcl:"path"` - KeepAfterImport bool `hcl:"keep_after_import"` - ExitOnErr bool `hcl:"exit_on_err"` - ServiceAccountTokenFile string `hcl:"service_account_token_file"` -} - -// AddPersistentStorageToLeaseCache adds persistence to a lease cache, based on a given PersistConfig -// Returns a close function to be deferred and the old token, if found, or an error -func AddPersistentStorageToLeaseCache(ctx context.Context, leaseCache *cache.LeaseCache, persistConfig *PersistConfig, logger log.Logger) (func() error, string, error) { - if persistConfig == nil { - return nil, "", errors.New("persist config was nil") - } - - if persistConfig.Path == "" { - return nil, "", errors.New("must specify persistent cache path") - } - - // Set AAD based on key protection type - var aad string - var err error - switch persistConfig.Type { - case "kubernetes": - aad, err = getServiceAccountJWT(persistConfig.ServiceAccountTokenFile) - if err != nil { - tokenFileName := persistConfig.ServiceAccountTokenFile - if len(tokenFileName) == 0 { - tokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" - } - return nil, "", fmt.Errorf("failed to read service account token from %s: %w", tokenFileName, err) - } - default: - return nil, "", fmt.Errorf("persistent key protection type %q not supported", persistConfig.Type) - } - - // Check if bolt file exists already - dbFileExists, err := cacheboltdb.DBFileExists(persistConfig.Path) - if err != nil { - return nil, "", fmt.Errorf("failed to check if bolt file exists at path %s: %w", persistConfig.Path, err) - } - if dbFileExists { - // Open the bolt file, but wait to setup Encryption - ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: persistConfig.Path, - Logger: logger.Named("cacheboltdb"), - }) - if err != nil { - return nil, "", fmt.Errorf("error opening persistent cache %v", err) - } - - // Get the token from bolt for retrieving the encryption key, - // then setup encryption so that restore is possible - token, err := ps.GetRetrievalToken() - if err != nil { - return nil, "", fmt.Errorf("error getting retrieval token from persistent cache: %w", err) - } - - if err := ps.Close(); err != nil { - return nil, "", fmt.Errorf("failed to close persistent cache file after getting retrieval token: %w", err) - } - - km, err := keymanager.NewPassthroughKeyManager(ctx, token) - if err != nil { - return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) - } - - // Open the bolt file with the wrapper provided - ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: persistConfig.Path, - Logger: logger.Named("cacheboltdb"), - Wrapper: km.Wrapper(), - AAD: aad, - }) - if err != nil { - return nil, "", fmt.Errorf("error opening persistent cache with wrapper: %w", err) - } - - // Restore anything in the persistent cache to the memory cache - if err := leaseCache.Restore(ctx, ps); err != nil { - logger.Error(fmt.Sprintf("error restoring in-memory cache from persisted file: %v", err)) - if persistConfig.ExitOnErr { - return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") - } - } - logger.Info("loaded memcache from persistent storage") - - // Check for previous auto-auth token - oldTokenBytes, err := ps.GetAutoAuthToken(ctx) - if err != nil { - logger.Error(fmt.Sprintf("error in fetching previous auto-auth token: %v", err)) - if persistConfig.ExitOnErr { - return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") - } - } - var previousToken string - if len(oldTokenBytes) > 0 { - oldToken, err := cachememdb.Deserialize(oldTokenBytes) - if err != nil { - logger.Error(fmt.Sprintf("error in deserializing previous auto-auth token cache entryn: %v", err)) - if persistConfig.ExitOnErr { - return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") - } - } - previousToken = oldToken.Token - } - - // If keep_after_import true, set persistent storage layer in - // leaseCache, else remove db file - if persistConfig.KeepAfterImport { - leaseCache.SetPersistentStorage(ps) - return ps.Close, previousToken, nil - } else { - if err := ps.Close(); err != nil { - logger.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) - } - dbFile := filepath.Join(persistConfig.Path, cacheboltdb.DatabaseFileName) - if err := os.Remove(dbFile); err != nil { - logger.Error(fmt.Sprintf("failed to remove persistent storage file %s: %v", dbFile, err)) - if persistConfig.ExitOnErr { - return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") - } - } - return nil, previousToken, nil - } - } else { - km, err := keymanager.NewPassthroughKeyManager(ctx, nil) - if err != nil { - return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) - } - ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: persistConfig.Path, - Logger: logger.Named("cacheboltdb"), - Wrapper: km.Wrapper(), - AAD: aad, - }) - if err != nil { - return nil, "", fmt.Errorf("error creating persistent cache: %w", err) - } - logger.Info("configured persistent storage", "path", persistConfig.Path) - - // Stash the key material in bolt - token, err := km.RetrievalToken(ctx) - if err != nil { - return nil, "", fmt.Errorf("error getting persistence key: %w", err) - } - if err := ps.StoreRetrievalToken(token); err != nil { - return nil, "", fmt.Errorf("error setting key in persistent cache: %w", err) - } - - leaseCache.SetPersistentStorage(ps) - return ps.Close, "", nil - } -} - -// getServiceAccountJWT attempts to read the service account JWT from the specified token file path. -// Defaults to using the Kubernetes default service account file path if token file path is empty. -func getServiceAccountJWT(tokenFile string) (string, error) { - if len(tokenFile) == 0 { - tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" - } - token, err := os.ReadFile(tokenFile) - if err != nil { - return "", err - } - return strings.TrimSpace(string(token)), nil -} diff --git a/command/agentproxyshared/helpers_test.go b/command/agentproxyshared/helpers_test.go deleted file mode 100644 index 24fdf1d9db533..0000000000000 --- a/command/agentproxyshared/helpers_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package agentproxyshared - -import ( - "context" - "os" - "testing" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -func testNewLeaseCache(t *testing.T, responses []*cache.SendResponse) *cache.LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - lc, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: cache.NewMockProxier(responses), - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - }) - if err != nil { - t.Fatal(err) - } - return lc -} - -func populateTempFile(t *testing.T, name, contents string) *os.File { - t.Helper() - - file, err := os.CreateTemp(t.TempDir(), name) - if err != nil { - t.Fatal(err) - } - - _, err = file.WriteString(contents) - if err != nil { - t.Fatal(err) - } - - err = file.Close() - if err != nil { - t.Fatal(err) - } - - return file -} - -// Test_AddPersistentStorageToLeaseCache Tests that AddPersistentStorageToLeaseCache() correctly -// adds persistent storage to a lease cache -func Test_AddPersistentStorageToLeaseCache(t *testing.T) { - tempDir := t.TempDir() - serviceAccountTokenFile := populateTempFile(t, "proxy-config.hcl", "token") - - persistConfig := &PersistConfig{ - Type: "kubernetes", - Path: tempDir, - KeepAfterImport: false, - ExitOnErr: false, - ServiceAccountTokenFile: serviceAccountTokenFile.Name(), - } - - leaseCache := testNewLeaseCache(t, nil) - if leaseCache.PersistentStorage() != nil { - t.Fatal("persistent storage was available before ours was added") - } - - deferFunc, token, err := AddPersistentStorageToLeaseCache(context.Background(), leaseCache, persistConfig, logging.NewVaultLogger(hclog.Info)) - if err != nil { - t.Fatal(err) - } - - if leaseCache.PersistentStorage() == nil { - t.Fatal("persistent storage was not added") - } - - if token != "" { - t.Fatal("expected token to be empty") - } - - if deferFunc == nil { - t.Fatal("expected deferFunc to not be nil") - } -} diff --git a/command/agentproxyshared/sink/mock/mock_sink.go b/command/agentproxyshared/sink/mock/mock_sink.go deleted file mode 100644 index c39baf9c8b28f..0000000000000 --- a/command/agentproxyshared/sink/mock/mock_sink.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package mock - -import ( - "github.com/hashicorp/vault/command/agentproxyshared/sink" -) - -type mockSink struct { - token string -} - -func NewSink(token string) sink.Sink { - return &mockSink{ - token: token, - } -} - -func (m *mockSink) WriteToken(token string) error { - m.token = token - return nil -} - -func (m *mockSink) Token() string { - return m.token -} diff --git a/command/approle_concurrency_integ_test.go b/command/approle_concurrency_integ_test.go index 934f8b33fd455..5dbcce064c8dd 100644 --- a/command/approle_concurrency_integ_test.go +++ b/command/approle_concurrency_integ_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit.go b/command/audit.go index 606de73eef810..8acea78f36c49 100644 --- a/command/audit.go +++ b/command/audit.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit_disable.go b/command/audit_disable.go index ef9288b0859c2..ddebfcbeda1b1 100644 --- a/command/audit_disable.go +++ b/command/audit_disable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit_disable_test.go b/command/audit_disable_test.go index 44b782f4dbe6b..0a7e8e4dcd99e 100644 --- a/command/audit_disable_test.go +++ b/command/audit_disable_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit_enable.go b/command/audit_enable.go index 652c3c27efefc..9ed7d5d30694a 100644 --- a/command/audit_enable.go +++ b/command/audit_enable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go index e7dc4ae786044..7d19f086ad586 100644 --- a/command/audit_enable_test.go +++ b/command/audit_enable_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit_list.go b/command/audit_list.go index e5af8525eef3e..a2dde2180a844 100644 --- a/command/audit_list.go +++ b/command/audit_list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/audit_list_test.go b/command/audit_list_test.go index c2e6eacf47b63..9cbb0af5eee31 100644 --- a/command/audit_list_test.go +++ b/command/audit_list_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth.go b/command/auth.go index e2bdb81c1ea2e..3c47b2b889c46 100644 --- a/command/auth.go +++ b/command/auth.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_disable.go b/command/auth_disable.go index 735103bdab601..773486107a513 100644 --- a/command/auth_disable.go +++ b/command/auth_disable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_disable_test.go b/command/auth_disable_test.go index 385bc4ec73ac9..51419b86637a9 100644 --- a/command/auth_disable_test.go +++ b/command/auth_disable_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_enable.go b/command/auth_enable.go index 7c7af550dbadf..bddf11cb56aa5 100644 --- a/command/auth_enable.go +++ b/command/auth_enable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -11,6 +8,7 @@ import ( "time" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -154,7 +152,7 @@ func (c *AuthEnableCommand) Flags() *FlagSets { f.StringVar(&StringVar{ Name: "plugin-name", Target: &c.flagPluginName, - Completion: c.PredictVaultPlugins(api.PluginTypeCredential), + Completion: c.PredictVaultPlugins(consts.PluginTypeCredential), Usage: "Name of the auth method plugin. This plugin name must already " + "exist in the Vault server's plugin catalog.", }) diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go index 4a4292ce80b5b..211c1da7ddb4a 100644 --- a/command/auth_enable_test.go +++ b/command/auth_enable_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -52,6 +49,18 @@ func TestAuthEnableCommand_Run(t *testing.T) { "", 2, }, + { + "deprecated builtin with standard mount", + []string{"app-id"}, + "mount entry associated with pending removal builtin", + 2, + }, + { + "deprecated builtin with different mount", + []string{"-path=/tmp", "app-id"}, + "mount entry associated with pending removal builtin", + 2, + }, } for _, tc := range cases { diff --git a/command/auth_help.go b/command/auth_help.go index 34b6b9ffa930b..41ea7be5f5d11 100644 --- a/command/auth_help.go +++ b/command/auth_help.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_help_test.go b/command/auth_help_test.go index a83695ee3e8f7..0c0d36f168f2e 100644 --- a/command/auth_help_test.go +++ b/command/auth_help_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_list.go b/command/auth_list.go index 25103a14dc5ac..f8b53d1518f30 100644 --- a/command/auth_list.go +++ b/command/auth_list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_list_test.go b/command/auth_list_test.go index 2e96f9f2ca078..decf6e9b06f0a 100644 --- a/command/auth_list_test.go +++ b/command/auth_list_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_move.go b/command/auth_move.go index 2af5ab65130a1..9e591ba64f0de 100644 --- a/command/auth_move.go +++ b/command/auth_move.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_move_test.go b/command/auth_move_test.go index 877afd27bfee5..035938efe5aa5 100644 --- a/command/auth_move_test.go +++ b/command/auth_move_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_test.go b/command/auth_test.go index dd8abb07f7e62..f0fd5d065d8bc 100644 --- a/command/auth_test.go +++ b/command/auth_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/auth_tune.go b/command/auth_tune.go index a7a09797f9067..de4c198273147 100644 --- a/command/auth_tune.go +++ b/command/auth_tune.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -23,22 +20,18 @@ var ( type AuthTuneCommand struct { *BaseCommand - flagAuditNonHMACRequestKeys []string - flagAuditNonHMACResponseKeys []string - flagDefaultLeaseTTL time.Duration - flagDescription string - flagListingVisibility string - flagMaxLeaseTTL time.Duration - flagPassthroughRequestHeaders []string - flagAllowedResponseHeaders []string - flagOptions map[string]string - flagTokenType string - flagVersion int - flagPluginVersion string - flagUserLockoutThreshold uint - flagUserLockoutDuration time.Duration - flagUserLockoutCounterResetDuration time.Duration - flagUserLockoutDisable bool + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagDefaultLeaseTTL time.Duration + flagDescription string + flagListingVisibility string + flagMaxLeaseTTL time.Duration + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagTokenType string + flagVersion int + flagPluginVersion string } func (c *AuthTuneCommand) Synopsis() string { @@ -152,41 +145,6 @@ func (c *AuthTuneCommand) Flags() *FlagSets { Usage: "Select the version of the auth method to run. Not supported by all auth methods.", }) - f.UintVar(&UintVar{ - Name: flagNameUserLockoutThreshold, - Target: &c.flagUserLockoutThreshold, - Usage: "The threshold for user lockout for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout threshold, " + - "or a previously configured value for the auth method.", - }) - - f.DurationVar(&DurationVar{ - Name: flagNameUserLockoutDuration, - Target: &c.flagUserLockoutDuration, - Completion: complete.PredictAnything, - Usage: "The user lockout duration for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout duration, " + - "or a previously configured value for the auth method.", - }) - - f.DurationVar(&DurationVar{ - Name: flagNameUserLockoutCounterResetDuration, - Target: &c.flagUserLockoutCounterResetDuration, - Completion: complete.PredictAnything, - Usage: "The user lockout counter reset duration for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout counter reset duration, " + - "or a previously configured value for the auth method.", - }) - - f.BoolVar(&BoolVar{ - Name: flagNameUserLockoutDisable, - Target: &c.flagUserLockoutDisable, - Default: false, - Usage: "Disable user lockout for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout disable, " + - "or a previously configured value for the auth method.", - }) - f.StringVar(&StringVar{ Name: flagNamePluginVersion, Target: &c.flagPluginVersion, @@ -272,24 +230,6 @@ func (c *AuthTuneCommand) Run(args []string) int { if fl.Name == flagNameTokenType { mountConfigInput.TokenType = c.flagTokenType } - switch fl.Name { - case flagNameUserLockoutThreshold, flagNameUserLockoutDuration, flagNameUserLockoutCounterResetDuration, flagNameUserLockoutDisable: - if mountConfigInput.UserLockoutConfig == nil { - mountConfigInput.UserLockoutConfig = &api.UserLockoutConfigInput{} - } - } - if fl.Name == flagNameUserLockoutThreshold { - mountConfigInput.UserLockoutConfig.LockoutThreshold = strconv.FormatUint(uint64(c.flagUserLockoutThreshold), 10) - } - if fl.Name == flagNameUserLockoutDuration { - mountConfigInput.UserLockoutConfig.LockoutDuration = ttlToAPI(c.flagUserLockoutDuration) - } - if fl.Name == flagNameUserLockoutCounterResetDuration { - mountConfigInput.UserLockoutConfig.LockoutCounterResetDuration = ttlToAPI(c.flagUserLockoutCounterResetDuration) - } - if fl.Name == flagNameUserLockoutDisable { - mountConfigInput.UserLockoutConfig.DisableLockout = &c.flagUserLockoutDisable - } if fl.Name == flagNamePluginVersion { mountConfigInput.PluginVersion = c.flagPluginVersion diff --git a/command/auth_tune_test.go b/command/auth_tune_test.go index aabcd8396020e..635a70f44be85 100644 --- a/command/auth_tune_test.go +++ b/command/auth_tune_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -9,7 +6,8 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -78,7 +76,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Run("flags_all", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) @@ -107,7 +105,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) } - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", api.PluginTypeCredential) + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", consts.PluginTypeCredential) code := cmd.Run([]string{ "-description", "new description", diff --git a/command/base.go b/command/base.go index 641fcb34e9458..62560ccf58067 100644 --- a/command/base.go +++ b/command/base.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -42,20 +39,20 @@ type BaseCommand struct { flags *FlagSets flagsOnce sync.Once - flagAddress string - flagAgentProxyAddress string - flagCACert string - flagCAPath string - flagClientCert string - flagClientKey string - flagNamespace string - flagNS string - flagPolicyOverride bool - flagTLSServerName string - flagTLSSkipVerify bool - flagDisableRedirects bool - flagWrapTTL time.Duration - flagUnlockKey string + flagAddress string + flagAgentAddress string + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagNamespace string + flagNS string + flagPolicyOverride bool + flagTLSServerName string + flagTLSSkipVerify bool + flagDisableRedirects bool + flagWrapTTL time.Duration + flagUnlockKey string flagFormat string flagField string @@ -90,8 +87,8 @@ func (c *BaseCommand) Client() (*api.Client, error) { if c.flagAddress != "" { config.Address = c.flagAddress } - if c.flagAgentProxyAddress != "" { - config.Address = c.flagAgentProxyAddress + if c.flagAgentAddress != "" { + config.Address = c.flagAgentAddress } if c.flagOutputCurlString { @@ -330,7 +327,7 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { agentAddrStringVar := &StringVar{ Name: "agent-address", - Target: &c.flagAgentProxyAddress, + Target: &c.flagAgentAddress, EnvVar: api.EnvVaultAgentAddr, Completion: complete.PredictAnything, Usage: "Address of the Agent.", @@ -521,10 +518,9 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { Target: &c.flagFormat, Default: "table", EnvVar: EnvVaultFormat, - Completion: complete.PredictSet("table", "json", "yaml", "pretty", "raw"), + Completion: complete.PredictSet("table", "json", "yaml", "pretty"), Usage: `Print the output in the given format. Valid formats - are "table", "json", "yaml", or "pretty". "raw" is allowed - for 'vault read' operations only.`, + are "table", "json", "yaml", or "pretty".`, }) } @@ -587,7 +583,6 @@ func (f *FlagSets) Completions() complete.Flags { type ( ParseOptions interface{} - ParseOptionAllowRawFormat bool DisableDisplayFlagWarning bool ) @@ -609,12 +604,7 @@ func (f *FlagSets) Parse(args []string, opts ...ParseOptions) error { } } - if err != nil { - return err - } - - // Now surface any other errors. - return generateFlagErrors(f, opts...) + return err } // Parsed reports whether the command-line flags have been parsed. diff --git a/command/base_flags.go b/command/base_flags.go index 3fe069fbb457a..4944454b78668 100644 --- a/command/base_flags.go +++ b/command/base_flags.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -252,14 +249,14 @@ func (i *intValue) Set(s string) error { return err } if v >= math.MinInt && v <= math.MaxInt { - *i.target = v + *i.target = int(v) return nil } return fmt.Errorf("Incorrect conversion of a 64-bit integer to a lower bit size. Value %d is not within bounds for int32", v) } -func (i *intValue) Get() interface{} { return *i.target } -func (i *intValue) String() string { return strconv.Itoa(*i.target) } +func (i *intValue) Get() interface{} { return int(*i.target) } +func (i *intValue) String() string { return strconv.Itoa(int(*i.target)) } func (i *intValue) Example() string { return "int" } func (i *intValue) Hidden() bool { return i.hidden } diff --git a/command/base_flags_test.go b/command/base_flags_test.go index 580e163de1f93..39777d24a9537 100644 --- a/command/base_flags_test.go +++ b/command/base_flags_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/base_helpers.go b/command/base_helpers.go index 2595dc56a6a55..b2a9f8c7fbd28 100644 --- a/command/base_helpers.go +++ b/command/base_helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -325,20 +322,3 @@ func generateFlagWarnings(args []string) string { return "" } } - -func generateFlagErrors(f *FlagSets, opts ...ParseOptions) error { - if Format(f.ui) == "raw" { - canUseRaw := false - for _, opt := range opts { - if value, ok := opt.(ParseOptionAllowRawFormat); ok { - canUseRaw = bool(value) - } - } - - if !canUseRaw { - return fmt.Errorf("This command does not support the -format=raw option.") - } - } - - return nil -} diff --git a/command/base_helpers_test.go b/command/base_helpers_test.go index 50cd26441ffa7..dee93b4bf44a1 100644 --- a/command/base_helpers_test.go +++ b/command/base_helpers_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/base_predict.go b/command/base_predict.go index ee2a771c7a968..61cbe092d61da 100644 --- a/command/base_predict.go +++ b/command/base_predict.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -10,6 +7,7 @@ import ( "sync" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/posener/complete" ) @@ -157,7 +155,7 @@ func (b *BaseCommand) PredictVaultAuths() complete.Predictor { } // PredictVaultPlugins returns a predictor for installed plugins. -func (b *BaseCommand) PredictVaultPlugins(pluginTypes ...api.PluginType) complete.Predictor { +func (b *BaseCommand) PredictVaultPlugins(pluginTypes ...consts.PluginType) complete.Predictor { return NewPredict().VaultPlugins(pluginTypes...) } @@ -220,7 +218,7 @@ func (p *Predict) VaultAuths() complete.Predictor { // VaultPlugins returns a predictor for Vault's plugin catalog. This is a public // API for consumers, but you probably want BaseCommand.PredictVaultPlugins // instead. -func (p *Predict) VaultPlugins(pluginTypes ...api.PluginType) complete.Predictor { +func (p *Predict) VaultPlugins(pluginTypes ...consts.PluginType) complete.Predictor { filterFunc := func() []string { return p.plugins(pluginTypes...) } @@ -397,12 +395,12 @@ func (p *Predict) auths() []string { } // plugins returns a sorted list of the plugins in the catalog. -func (p *Predict) plugins(pluginTypes ...api.PluginType) []string { +func (p *Predict) plugins(pluginTypes ...consts.PluginType) []string { // This method's signature doesn't enforce that a pluginType must be passed in. // If it's not, it's likely the caller's intent is go get a list of all of them, // so let's help them out. if len(pluginTypes) == 0 { - pluginTypes = append(pluginTypes, api.PluginTypeUnknown) + pluginTypes = append(pluginTypes, consts.PluginTypeUnknown) } client := p.Client() @@ -413,7 +411,7 @@ func (p *Predict) plugins(pluginTypes ...api.PluginType) []string { var plugins []string pluginsAdded := make(map[string]bool) for _, pluginType := range pluginTypes { - result, err := client.Sys().ListPlugins(&api.ListPluginsInput{Type: api.PluginType(pluginType)}) + result, err := client.Sys().ListPlugins(&api.ListPluginsInput{Type: pluginType}) if err != nil { return nil } diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 20af0f68810f9..644a366673d4e 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -346,9 +343,11 @@ func TestPredict_Plugins(t *testing.T) { []string{ "ad", "alicloud", + "app-id", "approle", "aws", "azure", + "cassandra", "cassandra-database-plugin", "centrify", "cert", @@ -368,10 +367,13 @@ func TestPredict_Plugins(t *testing.T) { "kubernetes", "kv", "ldap", + "mongodb", "mongodb-database-plugin", "mongodbatlas", "mongodbatlas-database-plugin", + "mssql", "mssql-database-plugin", + "mysql", "mysql-aurora-database-plugin", "mysql-database-plugin", "mysql-legacy-database-plugin", @@ -383,6 +385,7 @@ func TestPredict_Plugins(t *testing.T) { "openldap", "pcf", // Deprecated. "pki", + "postgresql", "postgresql-database-plugin", "rabbitmq", "radius", @@ -436,7 +439,7 @@ func TestPredict_Plugins(t *testing.T) { } } if !reflect.DeepEqual(act, tc.exp) { - t.Errorf("expected: %q, got: %q, diff: %v", tc.exp, act, strutil.Difference(act, tc.exp, true)) + t.Errorf("expected:%q, got: %q", tc.exp, act) } }) } diff --git a/command/base_test.go b/command/base_test.go index af4f0a4d3b62a..b3f75e0eb05d7 100644 --- a/command/base_test.go +++ b/command/base_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/command_test.go b/command/command_test.go index f1a5269b9f5fb..f4b8c7fd251ad 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -25,7 +22,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal" "github.com/mitchellh/cli" auditFile "github.com/hashicorp/vault/builtin/audit/file" @@ -71,54 +67,10 @@ func testVaultServer(tb testing.TB) (*api.Client, func()) { return client, closer } -func testVaultServerWithSecrets(ctx context.Context, tb testing.TB) (*api.Client, func()) { - tb.Helper() - - client, _, closer := testVaultServerUnseal(tb) - - // enable kv-v1 backend - if err := client.Sys().Mount("kv-v1/", &api.MountInput{ - Type: "kv-v1", - }); err != nil { - tb.Fatal(err) - } - - // enable kv-v2 backend - if err := client.Sys().Mount("kv-v2/", &api.MountInput{ - Type: "kv-v2", - }); err != nil { - tb.Fatal(err) - } - - // populate dummy secrets - for _, path := range []string{ - "foo", - "app-1/foo", - "app-1/bar", - "app-1/nested/baz", - } { - if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ - "user": "test", - "password": "Hashi123", - }); err != nil { - tb.Fatal(err) - } - - if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ - "user": "test", - "password": "Hashi123", - }); err != nil { - tb.Fatal(err) - } - } - - return client, closer -} - func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { tb.Helper() - client, _, closer := testVaultServerUnsealWithKVVersionWithSeal(tb, kvVersion, nil) + client, _, closer := testVaultServerUnsealWithKVVersion(tb, kvVersion) return client, closer } @@ -137,24 +89,13 @@ func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { return client, closer } -// testVaultServerAutoUnseal creates a test vault cluster and sets it up with auto unseal -// the function returns a client, the recovery keys, and a closer function -func testVaultServerAutoUnseal(tb testing.TB) (*api.Client, []string, func()) { - testSeal, _ := seal.NewTestSeal(nil) - autoSeal, err := vault.NewAutoSeal(testSeal) - if err != nil { - tb.Fatal("unable to create autoseal", err) - } - return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", autoSeal) -} - // testVaultServerUnseal creates a test vault cluster and returns a configured // API client, list of unseal keys (as strings), and a closer function. func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { - return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", nil) + return testVaultServerUnsealWithKVVersion(tb, "1") } -func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, seal vault.Seal) (*api.Client, []string, func()) { +func testVaultServerUnsealWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, []string, func()) { tb.Helper() logger := log.NewInterceptLogger(&log.LoggerOptions{ Output: log.DefaultOutput, @@ -170,7 +111,6 @@ func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, AuditBackends: defaultVaultAuditBackends, LogicalBackends: defaultVaultLogicalBackends, BuiltinRegistry: builtinplugins.Registry, - Seal: seal, }, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, NumCores: 1, @@ -204,8 +144,7 @@ func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*ap } // testVaultServerCoreConfig creates a new vault cluster with the given core -// configuration. This is a lower-level test helper. If the seal config supports recovery keys, then -// recovery keys are returned. Otherwise, unseal keys are returned +// configuration. This is a lower-level test helper. func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { tb.Helper() @@ -220,24 +159,14 @@ func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConf client := cluster.Cores[0].Client client.SetToken(cluster.RootToken) - var keys [][]byte - if coreConfig.Seal != nil && coreConfig.Seal.RecoveryKeySupported() { - keys = cluster.RecoveryKeys - } else { - keys = cluster.BarrierKeys + // Convert the unseal keys to base64 encoded, since these are how the user + // will get them. + unsealKeys := make([]string, len(cluster.BarrierKeys)) + for i := range unsealKeys { + unsealKeys[i] = base64.StdEncoding.EncodeToString(cluster.BarrierKeys[i]) } - return client, encodeKeys(keys), cluster.Cleanup -} - -// Convert the unseal keys to base64 encoded, since these are how the user -// will get them. -func encodeKeys(rawKeys [][]byte) []string { - keys := make([]string, len(rawKeys)) - for i := range rawKeys { - keys[i] = base64.StdEncoding.EncodeToString(rawKeys[i]) - } - return keys + return client, unsealKeys, func() { defer cluster.Cleanup() } } // testVaultServerUninit creates an uninitialized server. diff --git a/command/commands.go b/command/commands.go index 68e2542b06db2..82b4919e05b95 100644 --- a/command/commands.go +++ b/command/commands.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -12,7 +9,7 @@ import ( "github.com/hashicorp/vault/builtin/plugin" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" "github.com/mitchellh/cli" /* @@ -85,17 +82,10 @@ const ( EnvVaultLicensePath = "VAULT_LICENSE_PATH" // EnvVaultDetailed is to output detailed information (e.g., ListResponseWithInfo). EnvVaultDetailed = `VAULT_DETAILED` - // EnvVaultLogFormat is used to specify the log format. Supported values are "standard" and "json" - EnvVaultLogFormat = "VAULT_LOG_FORMAT" - // EnvVaultLogLevel is used to specify the log level applied to logging - // Supported log levels: Trace, Debug, Error, Warn, Info - EnvVaultLogLevel = "VAULT_LOG_LEVEL" - // EnvVaultExperiments defines the experiments to enable for a server as a - // comma separated list. See experiments.ValidExperiments() for the list of - // valid experiments. Not mutable or persisted in storage, only read and - // logged at startup _per node_. This was initially introduced for the events - // system being developed over multiple release cycles. - EnvVaultExperiments = "VAULT_EXPERIMENTS" + + // DisableSSCTokens is an env var used to disable index bearing + // token functionality + DisableSSCTokens = "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS" // flagNameAddress is the flag used in the base command to read in the // address of the Vault server. @@ -136,31 +126,8 @@ const ( flagNameAllowedManagedKeys = "allowed-managed-keys" // flagNamePluginVersion selects what version of a plugin should be used. flagNamePluginVersion = "plugin-version" - // flagNameUserLockoutThreshold is the flag name used for tuning the auth mount lockout threshold parameter - flagNameUserLockoutThreshold = "user-lockout-threshold" - // flagNameUserLockoutDuration is the flag name used for tuning the auth mount lockout duration parameter - flagNameUserLockoutDuration = "user-lockout-duration" - // flagNameUserLockoutCounterResetDuration is the flag name used for tuning the auth mount lockout counter reset parameter - flagNameUserLockoutCounterResetDuration = "user-lockout-counter-reset-duration" - // flagNameUserLockoutDisable is the flag name used for tuning the auth mount disable lockout parameter - flagNameUserLockoutDisable = "user-lockout-disable" // flagNameDisableRedirects is used to prevent the client from honoring a single redirect as a response to a request flagNameDisableRedirects = "disable-redirects" - // flagNameCombineLogs is used to specify whether log output should be combined and sent to stdout - flagNameCombineLogs = "combine-logs" - // flagNameLogFile is used to specify the path to the log file that Vault should use for logging - flagNameLogFile = "log-file" - // flagNameLogRotateBytes is the flag used to specify the number of bytes a log file should be before it is rotated. - flagNameLogRotateBytes = "log-rotate-bytes" - // flagNameLogRotateDuration is the flag used to specify the duration after which a log file should be rotated. - flagNameLogRotateDuration = "log-rotate-duration" - // flagNameLogRotateMaxFiles is the flag used to specify the maximum number of older/archived log files to keep. - flagNameLogRotateMaxFiles = "log-rotate-max-files" - // flagNameLogFormat is the flag used to specify the log format. Supported values are "standard" and "json" - flagNameLogFormat = "log-format" - // flagNameLogLevel is used to specify the log level applied to logging - // Supported log levels: Trace, Debug, Error, Warn, Info - flagNameLogLevel = "log-level" ) var ( @@ -218,10 +185,13 @@ var ( "kubernetes": ksr.NewServiceRegistration, } - initCommandsEnt = func(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) {} + initCommandsEnt = func(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {} ) -func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory { +// Commands is the mapping of all the available commands. +var Commands map[string]cli.CommandFactory + +func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { loginHandlers := map[string]LoginHandler{ "alicloud": &credAliCloud.CLIHandler{}, "aws": &credAws.CLIHandler{}, @@ -254,19 +224,13 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co } } - commands := map[string]cli.CommandFactory{ + Commands = map[string]cli.CommandFactory{ "agent": func() (cli.Command, error) { return &AgentCommand{ BaseCommand: &BaseCommand{ UI: serverCmdUi, }, ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - }, nil - }, - "agent generate-config": func() (cli.Command, error) { - return &AgentGenerateConfigCommand{ - BaseCommand: getBaseCommand(), }, nil }, "audit": func() (cli.Command, error) { @@ -336,11 +300,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, - "events subscribe": func() (cli.Command, error) { - return &EventsSubscribeCommands{ - BaseCommand: getBaseCommand(), - }, nil - }, "lease": func() (cli.Command, error) { return &LeaseCommand{ BaseCommand: getBaseCommand(), @@ -529,46 +488,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, - "patch": func() (cli.Command, error) { - return &PatchCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, "path-help": func() (cli.Command, error) { return &PathHelpCommand{ BaseCommand: getBaseCommand(), }, nil }, - "pki": func() (cli.Command, error) { - return &PKICommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki health-check": func() (cli.Command, error) { - return &PKIHealthCheckCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki issue": func() (cli.Command, error) { - return &PKIIssueCACommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki list-intermediates": func() (cli.Command, error) { - return &PKIListIntermediateCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki reissue": func() (cli.Command, error) { - return &PKIReIssueCACommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki verify-sign": func() (cli.Command, error) { - return &PKIVerifySignCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, "plugin": func() (cli.Command, error) { return &PluginCommand{ BaseCommand: getBaseCommand(), @@ -604,15 +528,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, - "proxy": func() (cli.Command, error) { - return &ProxyCommand{ - BaseCommand: &BaseCommand{ - UI: serverCmdUi, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - }, nil - }, "policy": func() (cli.Command, error) { return &PolicyCommand{ BaseCommand: getBaseCommand(), @@ -717,36 +632,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, - "transform": func() (cli.Command, error) { - return &TransformCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transform import": func() (cli.Command, error) { - return &TransformImportCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transform import-version": func() (cli.Command, error) { - return &TransformImportVersionCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transit": func() (cli.Command, error) { - return &TransitCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transit import": func() (cli.Command, error) { - return &TransitImportCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transit import-version": func() (cli.Command, error) { - return &TransitImportVersionCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, "token": func() (cli.Command, error) { return &TokenCommand{ BaseCommand: getBaseCommand(), @@ -881,8 +766,16 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co }, } - initCommandsEnt(ui, serverCmdUi, runOpts, commands) - return commands + // Disabled by default until functional + if os.Getenv(OperatorDiagnoseEnableEnv) != "" { + Commands["operator diagnose"] = func() (cli.Command, error) { + return &OperatorDiagnoseCommand{ + BaseCommand: getBaseCommand(), + }, nil + } + } + + initCommandsEnt(ui, serverCmdUi, runOpts) } // MakeShutdownCh returns a channel that can be used for shutdown diff --git a/command/commands_nonwindows.go b/command/commands_nonwindows.go index f8d128c3fd425..c94c485f49075 100644 --- a/command/commands_nonwindows.go +++ b/command/commands_nonwindows.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !windows package command diff --git a/command/commands_windows.go b/command/commands_windows.go index 541a6e4aea8ec..ed06a07406f63 100644 --- a/command/commands_windows.go +++ b/command/commands_windows.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build windows package command diff --git a/command/config.go b/command/config.go index 3fbc53a966707..b46581fc80b70 100644 --- a/command/config.go +++ b/command/config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/config/config.go b/command/config/config.go index 71f9127887d7a..ef0c4adf6dcd8 100644 --- a/command/config/config.go +++ b/command/config/config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package config import ( diff --git a/command/config/config_test.go b/command/config/config_test.go index fef151622259a..b5d41361e9b71 100644 --- a/command/config/config_test.go +++ b/command/config/config_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package config import ( diff --git a/command/config/util.go b/command/config/util.go index f295f462ae503..1ac47df7e0533 100644 --- a/command/config/util.go +++ b/command/config/util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package config import ( diff --git a/command/config/validate_listener.go b/command/config/validate_listener.go index e2d27166a04db..7a56ec6998508 100644 --- a/command/config/validate_listener.go +++ b/command/config/validate_listener.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !fips_140_3 package config diff --git a/command/config_test.go b/command/config_test.go index 787c6795765a0..0ed34992f3a4c 100644 --- a/command/config_test.go +++ b/command/config_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/debug.go b/command/debug.go index e5440b3b88881..71e1a97c8da7e 100644 --- a/command/debug.go +++ b/command/debug.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -24,7 +21,7 @@ import ( "github.com/hashicorp/vault/helper/osutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" "github.com/mholt/archiver/v3" "github.com/mitchellh/cli" "github.com/oklog/run" diff --git a/command/debug_test.go b/command/debug_test.go index e63e1ebed0585..de51c770f5acb 100644 --- a/command/debug_test.go +++ b/command/debug_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/delete.go b/command/delete.go index 7da6dd2d98479..78d786f11756c 100644 --- a/command/delete.go +++ b/command/delete.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/delete_test.go b/command/delete_test.go index 629be7abb42c2..e26d393b16fea 100644 --- a/command/delete_test.go +++ b/command/delete_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/events.go b/command/events.go deleted file mode 100644 index 353c97947d96d..0000000000000 --- a/command/events.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" - "nhooyr.io/websocket" -) - -var ( - _ cli.Command = (*EventsSubscribeCommands)(nil) - _ cli.CommandAutocomplete = (*EventsSubscribeCommands)(nil) -) - -type EventsSubscribeCommands struct { - *BaseCommand -} - -func (c *EventsSubscribeCommands) Synopsis() string { - return "Subscribe to events" -} - -func (c *EventsSubscribeCommands) Help() string { - helpText := ` -Usage: vault events subscribe [-format=json] [-timeout=XYZs] eventType - - Subscribe to events of the given event type (topic). The events will be - output to standard out. - - The output will be a JSON object serialized using the default protobuf - JSON serialization format, with one line per event received. -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *EventsSubscribeCommands) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - return set -} - -func (c *EventsSubscribeCommands) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *EventsSubscribeCommands) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *EventsSubscribeCommands) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - err = c.subscribeRequest(client, "sys/events/subscribe/"+args[0]) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - return 0 -} - -func (c *EventsSubscribeCommands) subscribeRequest(client *api.Client, path string) error { - r := client.NewRequest("GET", "/v1/"+path) - u := r.URL - if u.Scheme == "http" { - u.Scheme = "ws" - } else { - u.Scheme = "wss" - } - q := u.Query() - q.Set("json", "true") - u.RawQuery = q.Encode() - client.AddHeader("X-Vault-Token", client.Token()) - client.AddHeader("X-Vault-Namesapce", client.Namespace()) - ctx := context.Background() - conn, resp, err := websocket.Dial(ctx, u.String(), &websocket.DialOptions{ - HTTPClient: client.CloneConfig().HttpClient, - HTTPHeader: client.Headers(), - }) - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("events endpoint not found; check `vault read sys/experiments` to see if an events experiment is available but disabled") - } - return err - } - defer conn.Close(websocket.StatusNormalClosure, "") - - for { - _, message, err := conn.Read(ctx) - if err != nil { - return err - } - _, err = os.Stdout.Write(message) - if err != nil { - return err - } - } -} diff --git a/command/events_test.go b/command/events_test.go deleted file mode 100644 index bb2aef0b37baa..0000000000000 --- a/command/events_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "strings" - "testing" - - "github.com/mitchellh/cli" -) - -func testEventsSubscribeCommand(tb testing.TB) (*cli.MockUi, *EventsSubscribeCommands) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &EventsSubscribeCommands{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -// TestEventsSubscribeCommand_Run tests that the command argument parsing is working as expected. -func TestEventsSubscribeCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "not_enough_args", - []string{}, - "Not enough arguments", - 1, - }, - { - "too_many_args", - []string{"foo", "bar"}, - "Too many arguments", - 1, - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testEventsSubscribeCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } -} diff --git a/command/format.go b/command/format.go index 5e42d31d1f9ab..4dffed6463592 100644 --- a/command/format.go +++ b/command/format.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -73,7 +70,6 @@ var Formatters = map[string]Formatter{ "yaml": YamlFormatter{}, "yml": YamlFormatter{}, "pretty": PrettyFormatter{}, - "raw": RawFormatter{}, } func Format(ui cli.Ui) string { @@ -129,27 +125,6 @@ func (j JsonFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) e return nil } -// An output formatter for raw output of the original request object -type RawFormatter struct{} - -func (r RawFormatter) Format(data interface{}) ([]byte, error) { - byte_data, ok := data.([]byte) - if !ok { - return nil, fmt.Errorf("This command does not support the -format=raw option; only `vault read` does.") - } - - return byte_data, nil -} - -func (r RawFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { - b, err := r.Format(data) - if err != nil { - return err - } - ui.Output(string(b)) - return nil -} - // An output formatter for yaml output format of an object type YamlFormatter struct{} @@ -405,9 +380,6 @@ func (t TableFormatter) OutputSealStatusStruct(ui cli.Ui, secret *api.Secret, da if status.LastWAL != 0 { out = append(out, fmt.Sprintf("Last WAL | %d", status.LastWAL)) } - if len(status.Warnings) > 0 { - out = append(out, fmt.Sprintf("Warnings | %v", status.Warnings)) - } ui.Output(tableOutput(out, &columnize.Config{ Delim: "|", @@ -560,9 +532,6 @@ func (t TableFormatter) OutputSecret(ui cli.Ui, secret *api.Secret) error { for _, constraint := range constraintSet.Any { out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_id %s %s", k, constraint.Type, hopeDelim, constraint.ID)) out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_uses_passcode %s %t", k, constraint.Type, hopeDelim, constraint.UsesPasscode)) - if constraint.Name != "" { - out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_name %s %s", k, constraint.Type, hopeDelim, constraint.Name)) - } } } } else { // Token information only makes sense if no further MFA requirement (i.e. if we actually have a token) diff --git a/command/format_test.go b/command/format_test.go index 2bdc45ebe57ae..3d1a3333ca594 100644 --- a/command/format_test.go +++ b/command/format_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -16,10 +13,11 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" ) +var output string + type mockUi struct { t *testing.T SampleData string - outputData *string } func (m mockUi) Ask(_ string) (string, error) { @@ -31,15 +29,14 @@ func (m mockUi) AskSecret(_ string) (string, error) { m.t.FailNow() return "", nil } -func (m mockUi) Output(s string) { *m.outputData = s } +func (m mockUi) Output(s string) { output = s } func (m mockUi) Info(s string) { m.t.Log(s) } func (m mockUi) Error(s string) { m.t.Log(s) } func (m mockUi) Warn(s string) { m.t.Log(s) } func TestJsonFormatter(t *testing.T) { os.Setenv(EnvVaultFormat, "json") - var output string - ui := mockUi{t: t, SampleData: "something", outputData: &output} + ui := mockUi{t: t, SampleData: "something"} if err := outputWithFormat(ui, nil, ui); err != 0 { t.Fatal(err) } @@ -56,8 +53,7 @@ func TestJsonFormatter(t *testing.T) { func TestYamlFormatter(t *testing.T) { os.Setenv(EnvVaultFormat, "yaml") - var output string - ui := mockUi{t: t, SampleData: "something", outputData: &output} + ui := mockUi{t: t, SampleData: "something"} if err := outputWithFormat(ui, nil, ui); err != 0 { t.Fatal(err) } @@ -75,8 +71,7 @@ func TestYamlFormatter(t *testing.T) { func TestTableFormatter(t *testing.T) { os.Setenv(EnvVaultFormat, "table") - var output string - ui := mockUi{t: t, outputData: &output} + ui := mockUi{t: t} // Testing secret formatting s := api.Secret{Data: map[string]interface{}{"k": "something"}} @@ -93,8 +88,7 @@ func TestTableFormatter(t *testing.T) { // fields in the embedded struct explicitly. It also checks the spacing, // indentation, and delimiters of table formatting explicitly. func TestStatusFormat(t *testing.T) { - var output string - ui := mockUi{t: t, outputData: &output} + ui := mockUi{t: t} os.Setenv(EnvVaultFormat, "table") statusHA := getMockStatusData(false) @@ -124,8 +118,7 @@ Cluster ID cluster id HA Enabled true Raft Committed Index 3 Raft Applied Index 4 -Last WAL 2 -Warnings [warning]` +Last WAL 2` if expectedOutputString != output { fmt.Printf("%s\n%+v\n %s\n%+v\n", "output found was: ", output, "versus", expectedOutputString) @@ -181,7 +174,6 @@ func getMockStatusData(emptyFields bool) SealStatusOutput { ClusterID: "cluster id", RecoverySeal: true, StorageType: "storage type", - Warnings: []string{"warning"}, } // must initialize this struct without explicit field names due to embedding diff --git a/command/healthcheck/healthcheck.go b/command/healthcheck/healthcheck.go deleted file mode 100644 index a6fb2040eedbc..0000000000000 --- a/command/healthcheck/healthcheck.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -/* - * The healthcheck package attempts to allow generic checks of arbitrary - * engines, while providing a common framework with some performance - * efficiencies in mind. - * - * The core of this package is the Executor context; a caller would - * provision a set of checks, an API client, and a configuration, - * which the executor would use to decide which checks to execute - * and how. - * - * Checks are based around a series of remote paths that are fetched by - * the client; these are broken into two categories: static paths, which - * can always be fetched; and dynamic paths, which the check fetches based - * on earlier results. - * - * For instance, a basic PKI CA lifetime check will have static fetch against - * the list of CAs, and a dynamic fetch, using that earlier list, to fetch the - * PEMs of all CAs. - * - * This allows health checks to share data: many PKI checks will need the - * issuer list and so repeatedly fetching this may result in a performance - * impact. - */ - -package healthcheck - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/logical" -) - -type Executor struct { - Client *api.Client - Mount string - DefaultEnabled bool - - Config map[string]map[string]interface{} - - Resources map[string]map[logical.Operation]*PathFetch - - Checkers []Check -} - -func NewExecutor(client *api.Client, mount string) *Executor { - return &Executor{ - Client: client, - DefaultEnabled: true, - Mount: mount, - Config: make(map[string]map[string]interface{}), - Resources: make(map[string]map[logical.Operation]*PathFetch), - } -} - -func (e *Executor) AddCheck(c Check) { - e.Checkers = append(e.Checkers, c) -} - -func (e *Executor) BuildConfig(external map[string]interface{}) error { - merged := e.Config - - for index, checker := range e.Checkers { - name := checker.Name() - if _, present := merged[name]; name == "" || present { - return fmt.Errorf("bad checker %v: name is empty or already present: %v", index, name) - } - - // Fetch the default configuration; if the check returns enabled - // status, verify it matches our expectations (in the event it should - // be disabled by default), otherwise, add it in. - config := checker.DefaultConfig() - enabled, present := config["enabled"] - if !present { - config["enabled"] = e.DefaultEnabled - } else if enabled.(bool) && !e.DefaultEnabled { - config["enabled"] = e.DefaultEnabled - } - - // Now apply any external config for this check. - if econfig, present := external[name]; present { - for param, evalue := range econfig.(map[string]interface{}) { - if _, ok := config[param]; !ok { - // Assumption: default configs have all possible - // configuration options. This external config has - // an unknown option, so we want to error out. - return fmt.Errorf("unknown configuration option for %v: %v", name, param) - } - - config[param] = evalue - } - } - - // Now apply it and save it. - if err := checker.LoadConfig(config); err != nil { - return fmt.Errorf("error saving merged config for %v: %w", name, err) - } - merged[name] = config - } - - return nil -} - -func (e *Executor) Execute() (map[string][]*Result, error) { - ret := make(map[string][]*Result) - for _, checker := range e.Checkers { - if !checker.IsEnabled() { - continue - } - - if err := checker.FetchResources(e); err != nil { - return nil, fmt.Errorf("failed to fetch resources %v: %w", checker.Name(), err) - } - - results, err := checker.Evaluate(e) - if err != nil { - return nil, fmt.Errorf("failed to evaluate %v: %w", checker.Name(), err) - } - - if results == nil { - results = []*Result{} - } - - for _, result := range results { - result.Endpoint = e.templatePath(result.Endpoint) - result.StatusDisplay = ResultStatusNameMap[result.Status] - } - - ret[checker.Name()] = results - } - - return ret, nil -} - -func (e *Executor) templatePath(path string) string { - return strings.ReplaceAll(path, "{{mount}}", e.Mount) -} - -func (e *Executor) FetchIfNotFetched(op logical.Operation, rawPath string) (*PathFetch, error) { - path := e.templatePath(rawPath) - - byOp, present := e.Resources[path] - if present && byOp != nil { - result, present := byOp[op] - if present && result != nil { - return result, result.FetchSurfaceError() - } - } - - // Must not exist in cache; create it. - if byOp == nil { - e.Resources[path] = make(map[logical.Operation]*PathFetch) - } - - ret := &PathFetch{ - Operation: op, - Path: path, - ParsedCache: make(map[string]interface{}), - } - - data := map[string][]string{} - if op == logical.ListOperation { - data["list"] = []string{"true"} - } else if op != logical.ReadOperation { - return nil, fmt.Errorf("unknown operation: %v on %v", op, path) - } - - // client.ReadRaw* methods require a manual timeout override - ctx, cancel := context.WithTimeout(context.Background(), e.Client.ClientTimeout()) - defer cancel() - - response, err := e.Client.Logical().ReadRawWithDataWithContext(ctx, path, data) - ret.Response = response - if err != nil { - ret.FetchError = err - } else { - // Not all secrets will parse correctly. Sometimes we really want - // to fetch a raw endpoint, sometimes we're run with a bad mount - // or missing permissions. - secret, secretErr := e.Client.Logical().ParseRawResponseAndCloseBody(response, err) - if secretErr != nil { - ret.SecretParseError = secretErr - } else { - ret.Secret = secret - } - } - - e.Resources[path][op] = ret - return ret, ret.FetchSurfaceError() -} - -type PathFetch struct { - Operation logical.Operation - Path string - Response *api.Response - FetchError error - Secret *api.Secret - SecretParseError error - ParsedCache map[string]interface{} -} - -func (p *PathFetch) IsOK() bool { - return p.FetchError == nil && p.Response != nil -} - -func (p *PathFetch) IsSecretOK() bool { - return p.IsOK() && p.SecretParseError == nil && p.Secret != nil -} - -func (p *PathFetch) FetchSurfaceError() error { - if p.IsOK() || p.IsSecretPermissionsError() || p.IsUnsupportedPathError() || p.IsMissingResource() || p.Is404NotFound() { - return nil - } - - if strings.Contains(p.FetchError.Error(), "route entry not found") { - return fmt.Errorf("Error making API request: was a bad mount given?\n\nOperation: %v\nPath: %v\nOriginal Error:\n%w", p.Operation, p.Path, p.FetchError) - } - - return p.FetchError -} - -func (p *PathFetch) IsSecretPermissionsError() bool { - return !p.IsOK() && strings.Contains(p.FetchError.Error(), "permission denied") -} - -func (p *PathFetch) IsUnsupportedPathError() bool { - return !p.IsOK() && strings.Contains(p.FetchError.Error(), "unsupported path") -} - -func (p *PathFetch) IsMissingResource() bool { - return !p.IsOK() && strings.Contains(p.FetchError.Error(), "unable to find") -} - -func (p *PathFetch) Is404NotFound() bool { - return !p.IsOK() && strings.HasSuffix(strings.TrimSpace(p.FetchError.Error()), "Code: 404. Errors:") -} - -type Check interface { - Name() string - IsEnabled() bool - - DefaultConfig() map[string]interface{} - LoadConfig(config map[string]interface{}) error - - FetchResources(e *Executor) error - - Evaluate(e *Executor) ([]*Result, error) -} - -type ResultStatus int - -const ( - ResultNotApplicable ResultStatus = iota - ResultOK - ResultInformational - ResultWarning - ResultCritical - ResultInvalidVersion - ResultInsufficientPermissions -) - -var ResultStatusNameMap = map[ResultStatus]string{ - ResultNotApplicable: "not_applicable", - ResultOK: "ok", - ResultInformational: "informational", - ResultWarning: "warning", - ResultCritical: "critical", - ResultInvalidVersion: "invalid_version", - ResultInsufficientPermissions: "insufficient_permissions", -} - -var NameResultStatusMap = map[string]ResultStatus{ - "not_applicable": ResultNotApplicable, - "ok": ResultOK, - "informational": ResultInformational, - "warning": ResultWarning, - "critical": ResultCritical, - "invalid_version": ResultInvalidVersion, - "insufficient_permissions": ResultInsufficientPermissions, -} - -type Result struct { - Status ResultStatus `json:"status_code"` - StatusDisplay string `json:"status"` - Endpoint string `json:"endpoint,omitempty"` - Message string `json:"message,omitempty"` -} diff --git a/command/healthcheck/pki.go b/command/healthcheck/pki.go deleted file mode 100644 index 42f4fc485865f..0000000000000 --- a/command/healthcheck/pki.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - - "github.com/hashicorp/vault/sdk/logical" -) - -func pkiFetchIssuersList(e *Executor, versionError func()) (bool, *PathFetch, []string, error) { - issuersRet, err := e.FetchIfNotFetched(logical.ListOperation, "/{{mount}}/issuers") - if err != nil { - return true, issuersRet, nil, err - } - - if !issuersRet.IsSecretOK() { - if issuersRet.IsUnsupportedPathError() { - versionError() - } - - if issuersRet.Is404NotFound() { - return true, issuersRet, nil, fmt.Errorf("this mount lacks any configured issuers, limiting health check usefulness") - } - - return true, issuersRet, nil, nil - } - - if len(issuersRet.ParsedCache) == 0 { - var issuers []string - for _, rawIssuerId := range issuersRet.Secret.Data["keys"].([]interface{}) { - issuers = append(issuers, rawIssuerId.(string)) - } - issuersRet.ParsedCache["issuers"] = issuers - } - - return false, issuersRet, issuersRet.ParsedCache["issuers"].([]string), nil -} - -func parsePEM(contents string) ([]byte, error) { - // Need to parse out the issuer from its PEM format. - pemBlock, _ := pem.Decode([]byte(contents)) - if pemBlock == nil { - return nil, fmt.Errorf("invalid PEM block") - } - - return pemBlock.Bytes, nil -} - -func ParsePEMCert(contents string) (*x509.Certificate, error) { - parsed, err := parsePEM(contents) - if err != nil { - return nil, err - } - - cert, err := x509.ParseCertificate(parsed) - if err != nil { - return nil, fmt.Errorf("invalid certificate: %w", err) - } - - return cert, nil -} - -func parsePEMCRL(contents string) (*x509.RevocationList, error) { - parsed, err := parsePEM(contents) - if err != nil { - return nil, err - } - - crl, err := x509.ParseRevocationList(parsed) - if err != nil { - return nil, fmt.Errorf("invalid CRL: %w", err) - } - - return crl, nil -} - -func pkiFetchIssuer(e *Executor, issuer string, versionError func()) (bool, *PathFetch, *x509.Certificate, error) { - issuerRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/issuer/"+issuer+"/json") - if err != nil { - return true, issuerRet, nil, err - } - - if !issuerRet.IsSecretOK() { - if issuerRet.IsUnsupportedPathError() { - versionError() - } - return true, issuerRet, nil, nil - } - - if len(issuerRet.ParsedCache) == 0 { - cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) - if err != nil { - return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) - } - - issuerRet.ParsedCache["certificate"] = cert - } - - return false, issuerRet, issuerRet.ParsedCache["certificate"].(*x509.Certificate), nil -} - -func pkiFetchIssuerEntry(e *Executor, issuer string, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { - issuerRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/issuer/"+issuer) - if err != nil { - return true, issuerRet, nil, err - } - - if !issuerRet.IsSecretOK() { - if issuerRet.IsUnsupportedPathError() { - versionError() - } - return true, issuerRet, nil, nil - } - - if len(issuerRet.ParsedCache) == 0 { - cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) - if err != nil { - return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) - } - - issuerRet.ParsedCache["certificate"] = cert - } - - var data map[string]interface{} = nil - if issuerRet.Secret != nil && len(issuerRet.Secret.Data) > 0 { - data = issuerRet.Secret.Data - } - - return false, issuerRet, data, nil -} - -func pkiFetchIssuerCRL(e *Executor, issuer string, delta bool, versionError func()) (bool, *PathFetch, *x509.RevocationList, error) { - path := "/{{mount}}/issuer/" + issuer + "/crl" - name := "CRL" - if delta { - path += "/delta" - name = "Delta CRL" - } - - crlRet, err := e.FetchIfNotFetched(logical.ReadOperation, path) - if err != nil { - return true, crlRet, nil, err - } - - if !crlRet.IsSecretOK() { - if crlRet.IsUnsupportedPathError() { - versionError() - } - return true, crlRet, nil, nil - } - - if len(crlRet.ParsedCache) == 0 { - crl, err := parsePEMCRL(crlRet.Secret.Data["crl"].(string)) - if err != nil { - return true, crlRet, nil, fmt.Errorf("unable to parse issuer %v's %v: %w", issuer, name, err) - } - crlRet.ParsedCache["crl"] = crl - } - - return false, crlRet, crlRet.ParsedCache["crl"].(*x509.RevocationList), nil -} - -func pkiFetchKeyEntry(e *Executor, key string, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { - keyRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/key/"+key) - if err != nil { - return true, keyRet, nil, err - } - - if !keyRet.IsSecretOK() { - if keyRet.IsUnsupportedPathError() { - versionError() - } - return true, keyRet, nil, nil - } - - var data map[string]interface{} = nil - if keyRet.Secret != nil && len(keyRet.Secret.Data) > 0 { - data = keyRet.Secret.Data - } - - return false, keyRet, data, nil -} - -func pkiFetchLeavesList(e *Executor, versionError func()) (bool, *PathFetch, []string, error) { - leavesRet, err := e.FetchIfNotFetched(logical.ListOperation, "/{{mount}}/certs") - if err != nil { - return true, leavesRet, nil, err - } - - if !leavesRet.IsSecretOK() { - if leavesRet.IsUnsupportedPathError() { - versionError() - } - - return true, leavesRet, nil, nil - } - - if len(leavesRet.ParsedCache) == 0 { - var leaves []string - for _, rawSerial := range leavesRet.Secret.Data["keys"].([]interface{}) { - leaves = append(leaves, rawSerial.(string)) - } - leavesRet.ParsedCache["leaves"] = leaves - leavesRet.ParsedCache["count"] = len(leaves) - } - - return false, leavesRet, leavesRet.ParsedCache["leaves"].([]string), nil -} - -func pkiFetchLeaf(e *Executor, serial string, versionError func()) (bool, *PathFetch, *x509.Certificate, error) { - leafRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/cert/"+serial) - if err != nil { - return true, leafRet, nil, err - } - - if !leafRet.IsSecretOK() { - if leafRet.IsUnsupportedPathError() { - versionError() - } - return true, leafRet, nil, nil - } - - if len(leafRet.ParsedCache) == 0 { - cert, err := ParsePEMCert(leafRet.Secret.Data["certificate"].(string)) - if err != nil { - return true, leafRet, nil, fmt.Errorf("unable to parse leaf %v's certificate: %w", serial, err) - } - - leafRet.ParsedCache["certificate"] = cert - } - - return false, leafRet, leafRet.ParsedCache["certificate"].(*x509.Certificate), nil -} - -func pkiFetchRolesList(e *Executor, versionError func()) (bool, *PathFetch, []string, error) { - rolesRet, err := e.FetchIfNotFetched(logical.ListOperation, "/{{mount}}/roles") - if err != nil { - return true, rolesRet, nil, err - } - - if !rolesRet.IsSecretOK() { - if rolesRet.IsUnsupportedPathError() { - versionError() - } - - return true, rolesRet, nil, nil - } - - if len(rolesRet.ParsedCache) == 0 { - var roles []string - for _, roleName := range rolesRet.Secret.Data["keys"].([]interface{}) { - roles = append(roles, roleName.(string)) - } - rolesRet.ParsedCache["roles"] = roles - } - - return false, rolesRet, rolesRet.ParsedCache["roles"].([]string), nil -} - -func pkiFetchRole(e *Executor, name string, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { - roleRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/roles/"+name) - if err != nil { - return true, roleRet, nil, err - } - - if !roleRet.IsSecretOK() { - if roleRet.IsUnsupportedPathError() { - versionError() - } - return true, roleRet, nil, nil - } - - var data map[string]interface{} = nil - if roleRet.Secret != nil && len(roleRet.Secret.Data) > 0 { - data = roleRet.Secret.Data - } - - return false, roleRet, data, nil -} diff --git a/command/healthcheck/pki_allow_acme_headers.go b/command/healthcheck/pki_allow_acme_headers.go deleted file mode 100644 index 2015ac276c02d..0000000000000 --- a/command/healthcheck/pki_allow_acme_headers.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/logical" -) - -type AllowAcmeHeaders struct { - Enabled bool - UnsupportedVersion bool - - TuneFetcher *PathFetch - TuneData map[string]interface{} - - AcmeConfigFetcher *PathFetch -} - -func NewAllowAcmeHeaders() Check { - return &AllowAcmeHeaders{} -} - -func (h *AllowAcmeHeaders) Name() string { - return "allow_acme_headers" -} - -func (h *AllowAcmeHeaders) IsEnabled() bool { - return h.Enabled -} - -func (h *AllowAcmeHeaders) DefaultConfig() map[string]interface{} { - return map[string]interface{}{} -} - -func (h *AllowAcmeHeaders) LoadConfig(config map[string]interface{}) error { - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *AllowAcmeHeaders) FetchResources(e *Executor) error { - var err error - h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") - if err != nil { - return err - } - - if h.AcmeConfigFetcher.IsUnsupportedPathError() { - h.UnsupportedVersion = true - } - - _, h.TuneFetcher, h.TuneData, err = fetchMountTune(e, func() { - h.UnsupportedVersion = true - }) - if err != nil { - return err - } - - return nil -} - -func (h *AllowAcmeHeaders) Evaluate(e *Executor) ([]*Result, error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: h.AcmeConfigFetcher.Path, - Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.AcmeConfigFetcher.IsSecretPermissionsError() { - msg := "Without read access to ACME configuration, this health check is unable to function." - return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil - } - - acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) - if err != nil { - return nil, err - } - - if !acmeEnabled { - ret := Result{ - Status: ResultNotApplicable, - Endpoint: h.AcmeConfigFetcher.Path, - Message: "ACME is not enabled, no additional response headers required.", - } - return []*Result{&ret}, nil - } - - if h.TuneFetcher.IsSecretPermissionsError() { - msg := "Without access to mount tune information, this health check is unable to function." - return craftInsufficientPermissionResult(e, h.TuneFetcher.Path, msg), nil - } - - resp, err := StringList(h.TuneData["allowed_response_headers"]) - if err != nil { - return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) - } - - requiredResponseHeaders := []string{"Replay-Nonce", "Link", "Location"} - foundResponseHeaders := []string{} - for _, param := range resp { - for _, reqHeader := range requiredResponseHeaders { - if strings.EqualFold(param, reqHeader) { - foundResponseHeaders = append(foundResponseHeaders, reqHeader) - break - } - } - } - - foundAllHeaders := strutil.EquivalentSlices(requiredResponseHeaders, foundResponseHeaders) - - if !foundAllHeaders { - ret := Result{ - Status: ResultWarning, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Mount hasn't enabled 'Replay-Nonce', 'Link', 'Location' response headers, these are required for ACME to function.", - } - return []*Result{&ret}, nil - } - - ret := Result{ - Status: ResultOK, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Mount has enabled 'Replay-Nonce', 'Link', 'Location' response headers.", - } - return []*Result{&ret}, nil -} - -func craftInsufficientPermissionResult(e *Executor, path, errorMsg string) []*Result { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: path, - Message: errorMsg, - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message - } - - return []*Result{&ret} -} diff --git a/command/healthcheck/pki_allow_if_modified_since.go b/command/healthcheck/pki_allow_if_modified_since.go deleted file mode 100644 index bb5306e054188..0000000000000 --- a/command/healthcheck/pki_allow_if_modified_since.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type AllowIfModifiedSince struct { - Enabled bool - UnsupportedVersion bool - - TuneData map[string]interface{} - Fetcher *PathFetch -} - -func NewAllowIfModifiedSinceCheck() Check { - return &AllowIfModifiedSince{} -} - -func (h *AllowIfModifiedSince) Name() string { - return "allow_if_modified_since" -} - -func (h *AllowIfModifiedSince) IsEnabled() bool { - return h.Enabled -} - -func (h *AllowIfModifiedSince) DefaultConfig() map[string]interface{} { - return map[string]interface{}{} -} - -func (h *AllowIfModifiedSince) LoadConfig(config map[string]interface{}) error { - var err error - - h.Enabled, err = parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - - return nil -} - -func (h *AllowIfModifiedSince) FetchResources(e *Executor) error { - var exit bool - var err error - - exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { - h.UnsupportedVersion = true - }) - - if exit || err != nil { - return err - } - return nil -} - -func (h *AllowIfModifiedSince) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "This health check requires Vault 1.12+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.Fetcher.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - return - } - - req, err := StringList(h.TuneData["passthrough_request_headers"]) - if err != nil { - return nil, fmt.Errorf("unable to parse value from server for passthrough_request_headers: %w", err) - } - - resp, err := StringList(h.TuneData["allowed_response_headers"]) - if err != nil { - return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) - } - - foundIMS := false - for _, param := range req { - if strings.EqualFold(param, "If-Modified-Since") { - foundIMS = true - break - } - } - - foundLM := false - for _, param := range resp { - if strings.EqualFold(param, "Last-Modified") { - foundLM = true - break - } - } - - if !foundIMS || !foundLM { - ret := Result{ - Status: ResultInformational, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Mount hasn't enabled If-Modified-Since Request or Last-Modified Response headers; consider enabling these headers to allow clients to fetch CAs and CRLs only when they've changed, reducing total bandwidth.", - } - results = append(results, &ret) - } else { - ret := Result{ - Status: ResultOK, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Mount allows the If-Modified-Since request header and Last-Modified response header.", - } - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_audit_visibility.go b/command/healthcheck/pki_audit_visibility.go deleted file mode 100644 index 6b3834fb0c9e7..0000000000000 --- a/command/healthcheck/pki_audit_visibility.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -var VisibleReqParams = []string{ - "csr", - "certificate", - "issuer_ref", - "common_name", - "alt_names", - "other_sans", - "ip_sans", - "uri_sans", - "ttl", - "not_after", - "serial_number", - "key_type", - "private_key_format", - "managed_key_name", - "managed_key_id", - "ou", - "organization", - "country", - "locality", - "province", - "street_address", - "postal_code", - "permitted_dns_domains", - "policy_identifiers", - "ext_key_usage_oids", -} - -var VisibleRespParams = []string{ - "certificate", - "issuing_ca", - "serial_number", - "error", - "ca_chain", -} - -var HiddenReqParams = []string{ - "private_key", - "pem_bundle", -} - -var HiddenRespParams = []string{ - "private_key", - "pem_bundle", -} - -type AuditVisibility struct { - Enabled bool - UnsupportedVersion bool - - IgnoredParameters map[string]bool - TuneData map[string]interface{} - Fetcher *PathFetch -} - -func NewAuditVisibilityCheck() Check { - return &AuditVisibility{ - IgnoredParameters: make(map[string]bool), - } -} - -func (h *AuditVisibility) Name() string { - return "audit_visibility" -} - -func (h *AuditVisibility) IsEnabled() bool { - return h.Enabled -} - -func (h *AuditVisibility) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "ignored_parameters": []string{}, - } -} - -func (h *AuditVisibility) LoadConfig(config map[string]interface{}) error { - var err error - - coerced, err := StringList(config["ignored_parameters"]) - if err != nil { - return fmt.Errorf("error parsing %v.ignored_parameters: %v", h.Name(), err) - } - for _, ignored := range coerced { - h.IgnoredParameters[ignored] = true - } - - h.Enabled, err = parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - - return nil -} - -func (h *AuditVisibility) FetchResources(e *Executor) error { - var exit bool - var err error - - exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { - h.UnsupportedVersion = true - }) - - if exit || err != nil { - return err - } - return nil -} - -func (h *AuditVisibility) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "This health check requires Vault 1.9+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.Fetcher.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - return - } - - sourceMap := map[string][]string{ - "audit_non_hmac_request_keys": VisibleReqParams, - "audit_non_hmac_response_keys": VisibleRespParams, - } - for source, visibleList := range sourceMap { - actual, err := StringList(h.TuneData[source]) - if err != nil { - return nil, fmt.Errorf("error parsing %v from server: %v", source, err) - } - - for _, param := range visibleList { - found := false - for _, tuned := range actual { - if param == tuned { - found = true - break - } - } - - if !found { - ret := Result{ - Status: ResultInformational, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: fmt.Sprintf("Mount currently HMACs %v because it is not in %v; as this is not a sensitive security parameter, it is encouraged to disable HMACing to allow better auditing of the PKI engine.", param, source), - } - results = append(results, &ret) - } - } - } - - sourceMap = map[string][]string{ - "audit_non_hmac_request_keys": HiddenReqParams, - "audit_non_hmac_response_keys": HiddenRespParams, - } - for source, hiddenList := range sourceMap { - actual, err := StringList(h.TuneData[source]) - if err != nil { - return nil, fmt.Errorf("error parsing %v from server: %v", source, err) - } - for _, param := range hiddenList { - found := false - for _, tuned := range actual { - if param == tuned { - found = true - break - } - } - - if found { - ret := Result{ - Status: ResultWarning, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: fmt.Sprintf("Mount currently doesn't HMAC %v because it is in %v; as this is a sensitive security parameter it is encouraged to HMAC it in the Audit logs.", param, source), - } - results = append(results, &ret) - } - } - } - - if len(results) == 0 { - ret := Result{ - Status: ResultOK, - Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "Mount audit information is configured appropriately.", - } - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_ca_validity_period.go b/command/healthcheck/pki_ca_validity_period.go deleted file mode 100644 index 511de757061d8..0000000000000 --- a/command/healthcheck/pki_ca_validity_period.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "bytes" - "crypto/x509" - "fmt" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type CAValidityPeriod struct { - Enabled bool - - RootExpiries map[ResultStatus]time.Duration - IntermediateExpieries map[ResultStatus]time.Duration - - UnsupportedVersion bool - - Issuers map[string]*x509.Certificate -} - -func NewCAValidityPeriodCheck() Check { - return &CAValidityPeriod{ - RootExpiries: make(map[ResultStatus]time.Duration, 3), - IntermediateExpieries: make(map[ResultStatus]time.Duration, 3), - Issuers: make(map[string]*x509.Certificate), - } -} - -func (h *CAValidityPeriod) Name() string { - return "ca_validity_period" -} - -func (h *CAValidityPeriod) IsEnabled() bool { - return h.Enabled -} - -func (h *CAValidityPeriod) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "root_expiry_critical": "180d", - "intermediate_expiry_critical": "30d", - "root_expiry_warning": "365d", - "intermediate_expiry_warning": "60d", - "root_expiry_informational": "730d", - "intermediate_expiry_informational": "180d", - } -} - -func (h *CAValidityPeriod) LoadConfig(config map[string]interface{}) error { - parameters := []string{ - "root_expiry_critical", - "intermediate_expiry_critical", - "root_expiry_warning", - "intermediate_expiry_warning", - "root_expiry_informational", - "intermediate_expiry_informational", - } - for _, parameter := range parameters { - name_split := strings.Split(parameter, "_") - if len(name_split) != 3 || name_split[1] != "expiry" { - return fmt.Errorf("bad parameter: %v / %v / %v", parameter, len(name_split), name_split[1]) - } - - status, present := NameResultStatusMap[name_split[2]] - if !present { - return fmt.Errorf("bad parameter: %v's type %v isn't in name map", parameter, name_split[2]) - } - - value_raw, present := config[parameter] - if !present { - return fmt.Errorf("parameter not present in config; Executor should've handled this for us: %v", parameter) - } - - value, err := parseutil.ParseDurationSecond(value_raw) - if err != nil { - return fmt.Errorf("failed to parse parameter (%v=%v): %w", parameter, value_raw, err) - } - - if name_split[0] == "root" { - h.RootExpiries[status] = value - } else if name_split[0] == "intermediate" { - h.IntermediateExpieries[status] = value - } else { - return fmt.Errorf("bad parameter: %v's CA type isn't root/intermediate: %v", parameters, name_split[0]) - } - } - - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *CAValidityPeriod) FetchResources(e *Executor) error { - exit, _, issuers, err := pkiFetchIssuersList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - return err - } - - for _, issuer := range issuers { - skip, _, cert, err := pkiFetchIssuer(e, issuer, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil { - if err != nil { - return err - } - continue - } - - h.Issuers[issuer] = cert - } - - return nil -} - -func (h *CAValidityPeriod) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/issuers", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - now := time.Now() - - for name, cert := range h.Issuers { - var ret Result - ret.Status = ResultOK - ret.Endpoint = "/{{mount}}/issuer/" + name - ret.Message = fmt.Sprintf("Issuer's validity (%v) is OK", cert.NotAfter.Format("2006-01-02")) - - hasSelfReference := bytes.Equal(cert.RawSubject, cert.RawIssuer) - isSelfSigned := cert.CheckSignatureFrom(cert) == nil - isRoot := hasSelfReference && isSelfSigned - - for _, criticality := range []ResultStatus{ResultCritical, ResultWarning, ResultInformational} { - var d time.Duration - if isRoot { - d = h.RootExpiries[criticality] - } else { - d = h.IntermediateExpieries[criticality] - } - - windowExpiry := now.Add(d) - if cert.NotAfter.Before(windowExpiry) { - ret.Status = criticality - ret.Message = fmt.Sprintf("Issuer's validity is outside of the suggested rotation window: issuer is valid until %v but expires within %v (ending on %v). It is suggested to start rotating this issuer to new key material to avoid future downtime caused by this current issuer expiring.", cert.NotAfter.Format("2006-01-02"), FormatDuration(d), windowExpiry.Format("2006-01-02")) - break - } - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_crl_validity_period.go b/command/healthcheck/pki_crl_validity_period.go deleted file mode 100644 index 8450a058f994d..0000000000000 --- a/command/healthcheck/pki_crl_validity_period.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "crypto/x509" - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/logical" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type CRLValidityPeriod struct { - Enabled bool - - CRLExpiryPercentage int - DeltaCRLExpiryPercentage int - - UnsupportedVersion bool - NoDeltas bool - - CRLs map[string]*x509.RevocationList - DeltaCRLs map[string]*x509.RevocationList - - CRLConfig *PathFetch -} - -func NewCRLValidityPeriodCheck() Check { - return &CRLValidityPeriod{ - CRLs: make(map[string]*x509.RevocationList), - DeltaCRLs: make(map[string]*x509.RevocationList), - } -} - -func (h *CRLValidityPeriod) Name() string { - return "crl_validity_period" -} - -func (h *CRLValidityPeriod) IsEnabled() bool { - return h.Enabled -} - -func (h *CRLValidityPeriod) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "crl_expiry_pct_critical": "95", - "delta_crl_expiry_pct_critical": "95", - } -} - -func (h *CRLValidityPeriod) LoadConfig(config map[string]interface{}) error { - value, err := parseutil.SafeParseIntRange(config["crl_expiry_pct_critical"], 1, 99) - if err != nil { - return fmt.Errorf("error parsing %v.crl_expiry_pct_critical=%v: %w", h.Name(), config["crl_expiry_pct_critical"], err) - } - h.CRLExpiryPercentage = int(value) - - value, err = parseutil.SafeParseIntRange(config["delta_crl_expiry_pct_critical"], 1, 99) - if err != nil { - return fmt.Errorf("error parsing %v.delta_crl_expiry_pct_critical=%v: %w", h.Name(), config["delta_crl_expiry_pct_critical"], err) - } - h.DeltaCRLExpiryPercentage = int(value) - - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *CRLValidityPeriod) FetchResources(e *Executor) error { - exit, _, issuers, err := pkiFetchIssuersList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - return err - } - - for _, issuer := range issuers { - exit, _, crl, err := pkiFetchIssuerCRL(e, issuer, false, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - if err != nil { - return err - } - continue - } - - h.CRLs[issuer] = crl - - exit, _, delta, err := pkiFetchIssuerCRL(e, issuer, true, func() { - h.NoDeltas = true - }) - if exit || err != nil { - if err != nil { - return err - } - continue - } - - h.DeltaCRLs[issuer] = delta - } - - // Check if the issuer is fetched yet. - configRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/crl") - if err != nil { - return err - } - - h.CRLConfig = configRet - - return nil -} - -func (h *CRLValidityPeriod) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/issuers", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - now := time.Now() - crlDisabled := false - if h.CRLConfig != nil { - if h.CRLConfig.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/config/crl", - Message: "This prevents the health check from seeing if the CRL is disabled and dropping the severity of this check accordingly.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read authenticated CRL configuration for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to read the CRL configuration for this mount. " + ret.Message - } - - results = append(results, &ret) - } else if h.CRLConfig.Secret != nil && h.CRLConfig.Secret.Data["disabled"] != nil { - crlDisabled = h.CRLConfig.Secret.Data["disabled"].(bool) - } - } - - if h.NoDeltas && len(h.DeltaCRLs) == 0 { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/issuer/*/crl/delta", - Message: "This health check validates Delta CRLs on Vault 1.12+, but an earlier version of Vault was used. No results about delta CRL validity will be returned.", - } - results = append(results, &ret) - } - - for name, crl := range h.CRLs { - var ret Result - ret.Status = ResultOK - ret.Endpoint = "/{{mount}}/issuer/" + name + "/crl" - ret.Message = fmt.Sprintf("CRL's validity (%v to %v) is OK.", crl.ThisUpdate.Format("2006-01-02"), crl.NextUpdate.Format("2006-01-02")) - - used := now.Sub(crl.ThisUpdate) - total := crl.NextUpdate.Sub(crl.ThisUpdate) - ratio := time.Duration((int64(total) * int64(h.CRLExpiryPercentage)) / int64(100)) - if used >= ratio { - expWhen := crl.ThisUpdate.Add(ratio) - ret.Status = ResultCritical - ret.Message = fmt.Sprintf("CRL's validity is outside of suggested rotation window: CRL's next update is expected at %v, but expires within %v%% of validity window (starting on %v and ending on %v). It is suggested to rotate this CRL and start propagating it to hosts to avoid any issues caused by stale CRLs.", crl.NextUpdate.Format("2006-01-02"), h.CRLExpiryPercentage, crl.ThisUpdate.Format("2006-01-02"), expWhen.Format("2006-01-02")) - - if crlDisabled { - ret.Status = ResultInformational - ret.Message += " Because the CRL is disabled, this is less of a concern." - } - } - - results = append(results, &ret) - } - - for name, crl := range h.DeltaCRLs { - var ret Result - ret.Status = ResultOK - ret.Endpoint = "/{{mount}}/issuer/" + name + "/crl/delta" - ret.Message = fmt.Sprintf("Delta CRL's validity (%v to %v) is OK.", crl.ThisUpdate.Format("2006-01-02"), crl.NextUpdate.Format("2006-01-02")) - - used := now.Sub(crl.ThisUpdate) - total := crl.NextUpdate.Sub(crl.ThisUpdate) - ratio := time.Duration((int64(total) * int64(h.DeltaCRLExpiryPercentage)) / int64(100)) - if used >= ratio { - expWhen := crl.ThisUpdate.Add(ratio) - ret.Status = ResultCritical - ret.Message = fmt.Sprintf("Delta CRL's validity is outside of suggested rotation window: Delta CRL's next update is expected at %v, but expires within %v%% of validity window (starting on %v and ending on %v). It is suggested to rotate this Delta CRL and start propagating it to hosts to avoid any issues caused by stale CRLs.", crl.NextUpdate.Format("2006-01-02"), h.CRLExpiryPercentage, crl.ThisUpdate.Format("2006-01-02"), expWhen.Format("2006-01-02")) - - if crlDisabled { - ret.Status = ResultInformational - ret.Message += " Because the CRL is disabled, this is less of a concern." - } - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_enable_acme_issuance.go b/command/healthcheck/pki_enable_acme_issuance.go deleted file mode 100644 index 986165d0e253e..0000000000000 --- a/command/healthcheck/pki_enable_acme_issuance.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "bytes" - "context" - "crypto/tls" - "fmt" - "net/http" - "net/url" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/logical" - - "golang.org/x/crypto/acme" -) - -type EnableAcmeIssuance struct { - Enabled bool - UnsupportedVersion bool - - AcmeConfigFetcher *PathFetch - ClusterConfigFetcher *PathFetch - TotalIssuers int - RootIssuers int -} - -func NewEnableAcmeIssuance() Check { - return &EnableAcmeIssuance{} -} - -func (h *EnableAcmeIssuance) Name() string { - return "enable_acme_issuance" -} - -func (h *EnableAcmeIssuance) IsEnabled() bool { - return h.Enabled -} - -func (h *EnableAcmeIssuance) DefaultConfig() map[string]interface{} { - return map[string]interface{}{} -} - -func (h *EnableAcmeIssuance) LoadConfig(config map[string]interface{}) error { - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *EnableAcmeIssuance) FetchResources(e *Executor) error { - var err error - h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") - if err != nil { - return err - } - - if h.AcmeConfigFetcher.IsUnsupportedPathError() { - h.UnsupportedVersion = true - } - - h.ClusterConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/cluster") - if err != nil { - return err - } - - if h.ClusterConfigFetcher.IsUnsupportedPathError() { - h.UnsupportedVersion = true - } - - h.TotalIssuers, h.RootIssuers, err = doesMountContainOnlyRootIssuers(e) - - return nil -} - -func doesMountContainOnlyRootIssuers(e *Executor) (int, int, error) { - exit, _, issuers, err := pkiFetchIssuersList(e, func() {}) - if exit || err != nil { - return 0, 0, err - } - - totalIssuers := 0 - rootIssuers := 0 - - for _, issuer := range issuers { - skip, _, cert, err := pkiFetchIssuer(e, issuer, func() {}) - - if skip || err != nil { - if err != nil { - return 0, 0, err - } - continue - } - totalIssuers++ - - if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { - continue - } - if err := cert.CheckSignatureFrom(cert); err != nil { - continue - } - rootIssuers++ - } - - return totalIssuers, rootIssuers, nil -} - -func isAcmeEnabled(fetcher *PathFetch) (bool, error) { - isEnabledRaw, ok := fetcher.Secret.Data["enabled"] - if !ok { - return false, fmt.Errorf("enabled configuration field missing from acme config") - } - - parseBool, err := parseutil.ParseBool(isEnabledRaw) - if err != nil { - return false, fmt.Errorf("failed parsing 'enabled' field from ACME config: %w", err) - } - - return parseBool, nil -} - -func verifyLocalPathUrl(h *EnableAcmeIssuance) error { - localPathRaw, ok := h.ClusterConfigFetcher.Secret.Data["path"] - if !ok { - return fmt.Errorf("'path' field missing from config") - } - - localPath, err := parseutil.ParseString(localPathRaw) - if err != nil { - return fmt.Errorf("failed converting 'path' field from local config: %w", err) - } - - if localPath == "" { - return fmt.Errorf("'path' field not configured within /{{mount}}/config/cluster") - } - - parsedUrl, err := url.Parse(localPath) - if err != nil { - return fmt.Errorf("failed to parse URL from path config: %v: %w", localPathRaw, err) - } - - if parsedUrl.Scheme != "https" { - return fmt.Errorf("the configured 'path' field in /{{mount}}/config/cluster was not using an https scheme") - } - - // Avoid issues with SSL certificates for this check, we just want to validate that we would - // hit an ACME server with the path they specified in configuration - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{Transport: tr} - acmeDirectoryUrl := parsedUrl.JoinPath("/acme/", "directory") - acmeClient := acme.Client{HTTPClient: client, DirectoryURL: acmeDirectoryUrl.String()} - _, err = acmeClient.Discover(context.Background()) - if err != nil { - return fmt.Errorf("using configured 'path' field ('%s') in /{{mount}}/config/cluster failed to reach the ACME"+ - " directory: %s: %w", parsedUrl.String(), acmeDirectoryUrl.String(), err) - } - - return nil -} - -func (h *EnableAcmeIssuance) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: h.AcmeConfigFetcher.Path, - Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.AcmeConfigFetcher.IsSecretPermissionsError() { - msg := "Without this information, this health check is unable to function." - return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil - } - - acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) - if err != nil { - return nil, err - } - - if !acmeEnabled { - if h.TotalIssuers == 0 { - ret := Result{ - Status: ResultNotApplicable, - Endpoint: h.AcmeConfigFetcher.Path, - Message: "No issuers in mount, ACME is not required.", - } - return []*Result{&ret}, nil - } - - if h.TotalIssuers == h.RootIssuers { - ret := Result{ - Status: ResultNotApplicable, - Endpoint: h.AcmeConfigFetcher.Path, - Message: "Mount contains only root issuers, ACME is not required.", - } - return []*Result{&ret}, nil - } - - ret := Result{ - Status: ResultInformational, - Endpoint: h.AcmeConfigFetcher.Path, - Message: "Consider enabling ACME support to support a self-rotating PKI infrastructure.", - } - return []*Result{&ret}, nil - } - - if h.ClusterConfigFetcher.IsSecretPermissionsError() { - msg := "Without this information, this health check is unable to function." - return craftInsufficientPermissionResult(e, h.ClusterConfigFetcher.Path, msg), nil - } - - localPathIssue := verifyLocalPathUrl(h) - - if localPathIssue != nil { - ret := Result{ - Status: ResultWarning, - Endpoint: h.ClusterConfigFetcher.Path, - Message: "ACME enabled in config but not functional: " + localPathIssue.Error(), - } - return []*Result{&ret}, nil - } - - ret := Result{ - Status: ResultOK, - Endpoint: h.ClusterConfigFetcher.Path, - Message: "ACME enabled and successfully connected to the ACME directory.", - } - return []*Result{&ret}, nil -} diff --git a/command/healthcheck/pki_enable_auto_tidy.go b/command/healthcheck/pki_enable_auto_tidy.go deleted file mode 100644 index 1734d1adcbf15..0000000000000 --- a/command/healthcheck/pki_enable_auto_tidy.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/logical" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type EnableAutoTidy struct { - Enabled bool - UnsupportedVersion bool - - IntervalDurationCritical time.Duration - IntervalDurationWarning time.Duration - PauseDurationCritical time.Duration - PauseDurationWarning time.Duration - - TidyConfig *PathFetch -} - -func NewEnableAutoTidyCheck() Check { - return &EnableAutoTidy{} -} - -func (h *EnableAutoTidy) Name() string { - return "enable_auto_tidy" -} - -func (h *EnableAutoTidy) IsEnabled() bool { - return h.Enabled -} - -func (h *EnableAutoTidy) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "interval_duration_critical": "7d", - "interval_duration_warning": "2d", - "pause_duration_critical": "1s", - "pause_duration_warning": "200ms", - } -} - -func (h *EnableAutoTidy) fromConfig(config map[string]interface{}, param string) (time.Duration, error) { - value, err := parseutil.ParseDurationSecond(config[param]) - if err != nil { - return time.Duration(0), fmt.Errorf("failed to parse parameter %v.%v=%v: %w", h.Name(), param, config[param], err) - } - - return value, nil -} - -func (h *EnableAutoTidy) LoadConfig(config map[string]interface{}) error { - var err error - - h.IntervalDurationCritical, err = h.fromConfig(config, "interval_duration_critical") - if err != nil { - return err - } - - h.IntervalDurationWarning, err = h.fromConfig(config, "interval_duration_warning") - if err != nil { - return err - } - - h.PauseDurationCritical, err = h.fromConfig(config, "pause_duration_critical") - if err != nil { - return err - } - - h.PauseDurationWarning, err = h.fromConfig(config, "pause_duration_warning") - if err != nil { - return err - } - - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *EnableAutoTidy) FetchResources(e *Executor) error { - var err error - h.TidyConfig, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/auto-tidy") - if err != nil { - return err - } - - if h.TidyConfig.IsUnsupportedPathError() { - h.UnsupportedVersion = true - } - - return nil -} - -func (h *EnableAutoTidy) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: "This health check requires Vault 1.12+, but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.TidyConfig == nil { - return - } - - if h.TidyConfig.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: "This prevents the health check from functioning at all, as it cannot .", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read authenticated auto-tidy configuration for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to read the auto-tidy configuration for this mount. " + ret.Message - } - - return []*Result{&ret}, nil - } - - isEnabled := h.TidyConfig.Secret.Data["enabled"].(bool) - intervalDuration, err := parseutil.ParseDurationSecond(h.TidyConfig.Secret.Data["interval_duration"]) - if err != nil { - return nil, fmt.Errorf("error parsing API response from server for interval_duration: %w", err) - } - - pauseDuration, err := parseutil.ParseDurationSecond(h.TidyConfig.Secret.Data["pause_duration"]) - if err != nil { - return nil, fmt.Errorf("error parsing API response from server for pause_duration: %w", err) - } - - if !isEnabled { - ret := Result{ - Status: ResultInformational, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: "Auto-tidy is currently disabled; consider enabling auto-tidy to execute tidy operations periodically. This helps the health and performance of a mount.", - } - results = append(results, &ret) - } else { - baseMsg := "Auto-tidy is configured with too long of a value for %v (%v); this could impact performance as tidies run too infrequently or take too long to execute." - - if intervalDuration >= h.IntervalDurationCritical { - ret := Result{ - Status: ResultCritical, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: fmt.Sprintf(baseMsg, "interval_duration", intervalDuration), - } - results = append(results, &ret) - } else if intervalDuration >= h.IntervalDurationWarning { - ret := Result{ - Status: ResultWarning, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: fmt.Sprintf(baseMsg, "interval_duration", intervalDuration), - } - results = append(results, &ret) - } - - if pauseDuration >= h.PauseDurationCritical { - ret := Result{ - Status: ResultCritical, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: fmt.Sprintf(baseMsg, "pause_duration", pauseDuration), - } - results = append(results, &ret) - } else if pauseDuration >= h.PauseDurationWarning { - ret := Result{ - Status: ResultWarning, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: fmt.Sprintf(baseMsg, "pause_duration", pauseDuration), - } - results = append(results, &ret) - } - - if len(results) == 0 { - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/config/auto-tidy", - Message: "Auto-tidy is enabled and configured appropriately.", - } - results = append(results, &ret) - } - } - - return -} diff --git a/command/healthcheck/pki_hardware_backed_root.go b/command/healthcheck/pki_hardware_backed_root.go deleted file mode 100644 index 2fdda6e4a62fb..0000000000000 --- a/command/healthcheck/pki_hardware_backed_root.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "bytes" - "crypto/x509" - "fmt" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type HardwareBackedRoot struct { - Enabled bool - - UnsupportedVersion bool - - FetchIssues map[string]*PathFetch - IssuerKeyMap map[string]string - KeyIsManaged map[string]string -} - -func NewHardwareBackedRootCheck() Check { - return &HardwareBackedRoot{ - FetchIssues: make(map[string]*PathFetch), - IssuerKeyMap: make(map[string]string), - KeyIsManaged: make(map[string]string), - } -} - -func (h *HardwareBackedRoot) Name() string { - return "hardware_backed_root" -} - -func (h *HardwareBackedRoot) IsEnabled() bool { - return h.Enabled -} - -func (h *HardwareBackedRoot) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "enabled": false, - } -} - -func (h *HardwareBackedRoot) LoadConfig(config map[string]interface{}) error { - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *HardwareBackedRoot) FetchResources(e *Executor) error { - exit, _, issuers, err := pkiFetchIssuersList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - return err - } - - for _, issuer := range issuers { - skip, ret, entry, err := pkiFetchIssuerEntry(e, issuer, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil || entry == nil { - if err != nil { - return err - } - h.FetchIssues[issuer] = ret - continue - } - - // Ensure we only check Root CAs. - cert := ret.ParsedCache["certificate"].(*x509.Certificate) - if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { - continue - } - if err := cert.CheckSignatureFrom(cert); err != nil { - continue - } - - // Ensure we only check issuers with keys. - keyId, present := entry["key_id"].(string) - if !present || len(keyId) == 0 { - continue - } - - h.IssuerKeyMap[issuer] = keyId - skip, ret, keyEntry, err := pkiFetchKeyEntry(e, keyId, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil || keyEntry == nil { - if err != nil { - return err - } - - h.FetchIssues[issuer] = ret - continue - } - - uuid, present := keyEntry["managed_key_id"].(string) - if present { - h.KeyIsManaged[keyId] = uuid - } - } - - return nil -} - -func (h *HardwareBackedRoot) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/issuers", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - for issuer, fetchPath := range h.FetchIssues { - if fetchPath != nil && fetchPath.IsSecretPermissionsError() { - delete(h.IssuerKeyMap, issuer) - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: fetchPath.Path, - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - } - } - - for name, keyId := range h.IssuerKeyMap { - var ret Result - ret.Status = ResultInformational - ret.Endpoint = "/{{mount}}/issuer/" + name - ret.Message = "Root issuer was created using Vault-backed software keys; for added safety of long-lived, important root CAs, you may wish to consider using a HSM or KSM Managed Key to store key material for this issuer." - - uuid, present := h.KeyIsManaged[keyId] - if present { - ret.Status = ResultOK - ret.Message = fmt.Sprintf("Root issuer was backed by a HSM or KMS Managed Key: %v.", uuid) - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_role_allows_glob_wildcards.go b/command/healthcheck/pki_role_allows_glob_wildcards.go deleted file mode 100644 index 83c55c23856d5..0000000000000 --- a/command/healthcheck/pki_role_allows_glob_wildcards.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type RoleAllowsGlobWildcards struct { - Enabled bool - UnsupportedVersion bool - - RoleListFetchIssue *PathFetch - RoleFetchIssues map[string]*PathFetch - RoleEntryMap map[string]map[string]interface{} -} - -func NewRoleAllowsGlobWildcardsCheck() Check { - return &RoleAllowsGlobWildcards{ - RoleFetchIssues: make(map[string]*PathFetch), - RoleEntryMap: make(map[string]map[string]interface{}), - } -} - -func (h *RoleAllowsGlobWildcards) Name() string { - return "role_allows_glob_wildcards" -} - -func (h *RoleAllowsGlobWildcards) IsEnabled() bool { - return h.Enabled -} - -func (h *RoleAllowsGlobWildcards) DefaultConfig() map[string]interface{} { - return map[string]interface{}{} -} - -func (h *RoleAllowsGlobWildcards) LoadConfig(config map[string]interface{}) error { - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *RoleAllowsGlobWildcards) FetchResources(e *Executor) error { - exit, f, roles, err := pkiFetchRolesList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - if f != nil && f.IsSecretPermissionsError() { - h.RoleListFetchIssue = f - } - return err - } - - for _, role := range roles { - skip, f, entry, err := pkiFetchRole(e, role, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil || entry == nil { - if f != nil && f.IsSecretPermissionsError() { - h.RoleFetchIssues[role] = f - } - if err != nil { - return err - } - continue - } - - h.RoleEntryMap[role] = entry - } - - return nil -} - -func (h *RoleAllowsGlobWildcards) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - // Shouldn't happen; roles have been around forever. - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/roles", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: h.RoleListFetchIssue.Path, - Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", - } - if e.Client.Token() == "" { - ret.Message = "No token available and so this health check " + ret.Message - } else { - ret.Message = "This token " + ret.Message - } - return []*Result{&ret}, nil - } - - for role, fetchPath := range h.RoleFetchIssues { - if fetchPath != nil && fetchPath.IsSecretPermissionsError() { - delete(h.RoleEntryMap, role) - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: fetchPath.Path, - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - } - } - - for role, entry := range h.RoleEntryMap { - allowsWildcards, present := entry["allow_wildcard_certificates"] - if !present { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/roles", - Message: "This health check requires a version of Vault with allow_wildcard_certificates (Vault 1.8.9+, 1.9.4+, or 1.10.0+), but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - if !allowsWildcards.(bool) { - continue - } - - allowsGlobs := entry["allow_glob_domains"].(bool) - if !allowsGlobs { - continue - } - - rawAllowedDomains := entry["allowed_domains"].([]interface{}) - var allowedDomains []string - for _, rawDomain := range rawAllowedDomains { - allowedDomains = append(allowedDomains, rawDomain.(string)) - } - - if len(allowedDomains) == 0 { - continue - } - - hasGlobbedDomain := false - for _, domain := range allowedDomains { - if strings.Contains(domain, "*") { - hasGlobbedDomain = true - break - } - } - - if !hasGlobbedDomain { - continue - } - - ret := Result{ - Status: ResultWarning, - Endpoint: "/{{mount}}/roles/" + role, - Message: fmt.Sprintf("Role currently allows wildcard issuance while allowing globs in allowed_domains (%v). Because globs can expand to one or more wildcard character, including wildcards under additional subdomains, these options are dangerous to enable together. If glob domains are required to be enabled, it is suggested to either disable wildcard issuance if not desired, or create two separate roles -- one with wildcard issuance for specified domains and one with glob matching enabled for concrete domain identifiers.", allowedDomains), - } - - results = append(results, &ret) - } - - if len(results) == 0 && len(h.RoleEntryMap) > 0 { - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/roles", - Message: "Roles follow best practices regarding restricting wildcard certificate issuance in roles.", - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_role_allows_localhost.go b/command/healthcheck/pki_role_allows_localhost.go deleted file mode 100644 index 0c9b780abef78..0000000000000 --- a/command/healthcheck/pki_role_allows_localhost.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type RoleAllowsLocalhost struct { - Enabled bool - UnsupportedVersion bool - - RoleListFetchIssue *PathFetch - RoleFetchIssues map[string]*PathFetch - RoleEntryMap map[string]map[string]interface{} -} - -func NewRoleAllowsLocalhostCheck() Check { - return &RoleAllowsLocalhost{ - RoleFetchIssues: make(map[string]*PathFetch), - RoleEntryMap: make(map[string]map[string]interface{}), - } -} - -func (h *RoleAllowsLocalhost) Name() string { - return "role_allows_localhost" -} - -func (h *RoleAllowsLocalhost) IsEnabled() bool { - return h.Enabled -} - -func (h *RoleAllowsLocalhost) DefaultConfig() map[string]interface{} { - return map[string]interface{}{} -} - -func (h *RoleAllowsLocalhost) LoadConfig(config map[string]interface{}) error { - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *RoleAllowsLocalhost) FetchResources(e *Executor) error { - exit, f, roles, err := pkiFetchRolesList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - if f != nil && f.IsSecretPermissionsError() { - h.RoleListFetchIssue = f - } - return err - } - - for _, role := range roles { - skip, f, entry, err := pkiFetchRole(e, role, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil || entry == nil { - if f != nil && f.IsSecretPermissionsError() { - h.RoleFetchIssues[role] = f - } - if err != nil { - return err - } - continue - } - - h.RoleEntryMap[role] = entry - } - - return nil -} - -func (h *RoleAllowsLocalhost) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - // Shouldn't happen; roles have been around forever. - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/roles", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: h.RoleListFetchIssue.Path, - Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", - } - if e.Client.Token() == "" { - ret.Message = "No token available and so this health check " + ret.Message - } else { - ret.Message = "This token " + ret.Message - } - return []*Result{&ret}, nil - } - - for role, fetchPath := range h.RoleFetchIssues { - if fetchPath != nil && fetchPath.IsSecretPermissionsError() { - delete(h.RoleEntryMap, role) - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: fetchPath.Path, - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - } - } - - for role, entry := range h.RoleEntryMap { - allowsLocalhost := entry["allow_localhost"].(bool) - if !allowsLocalhost { - continue - } - - rawAllowedDomains := entry["allowed_domains"].([]interface{}) - var allowedDomains []string - for _, rawDomain := range rawAllowedDomains { - allowedDomains = append(allowedDomains, rawDomain.(string)) - } - - if len(allowedDomains) == 0 { - continue - } - - ret := Result{ - Status: ResultWarning, - Endpoint: "/{{mount}}/roles/" + role, - Message: fmt.Sprintf("Role currently allows localhost issuance with a non-empty allowed_domains (%v): this role is intended for issuing other hostnames and the allow_localhost=true option may be overlooked by operators. If this role is intended to issue certificates valid for localhost, consider setting allow_localhost=false and explicitly adding localhost to the list of allowed domains.", allowedDomains), - } - - results = append(results, &ret) - } - - if len(results) == 0 && len(h.RoleEntryMap) > 0 { - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/roles", - Message: "Roles follow best practices regarding allowing issuance for localhost domains.", - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_role_no_store_false.go b/command/healthcheck/pki_role_no_store_false.go deleted file mode 100644 index 882955e2bd571..0000000000000 --- a/command/healthcheck/pki_role_no_store_false.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - - "github.com/hashicorp/vault/sdk/logical" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type RoleNoStoreFalse struct { - Enabled bool - UnsupportedVersion bool - - AllowedRoles map[string]bool - - RoleListFetchIssue *PathFetch - RoleFetchIssues map[string]*PathFetch - RoleEntryMap map[string]map[string]interface{} - CRLConfig *PathFetch -} - -func NewRoleNoStoreFalseCheck() Check { - return &RoleNoStoreFalse{ - RoleFetchIssues: make(map[string]*PathFetch), - AllowedRoles: make(map[string]bool), - RoleEntryMap: make(map[string]map[string]interface{}), - } -} - -func (h *RoleNoStoreFalse) Name() string { - return "role_no_store_false" -} - -func (h *RoleNoStoreFalse) IsEnabled() bool { - return h.Enabled -} - -func (h *RoleNoStoreFalse) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "allowed_roles": []string{}, - } -} - -func (h *RoleNoStoreFalse) LoadConfig(config map[string]interface{}) error { - value, present := config["allowed_roles"].([]interface{}) - if present { - for _, rawValue := range value { - h.AllowedRoles[rawValue.(string)] = true - } - } - - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { - exit, f, roles, err := pkiFetchRolesList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - if f != nil && f.IsSecretPermissionsError() { - h.RoleListFetchIssue = f - } - return err - } - - for _, role := range roles { - skip, f, entry, err := pkiFetchRole(e, role, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil || entry == nil { - if f != nil && f.IsSecretPermissionsError() { - h.RoleFetchIssues[role] = f - } - if err != nil { - return err - } - continue - } - - h.RoleEntryMap[role] = entry - } - - // Check if the issuer is fetched yet. - configRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/crl") - if err != nil { - return err - } - - h.CRLConfig = configRet - - return nil -} - -func (h *RoleNoStoreFalse) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - // Shouldn't happen; roles have been around forever. - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/roles", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: h.RoleListFetchIssue.Path, - Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", - } - if e.Client.Token() == "" { - ret.Message = "No token available and so this health check " + ret.Message - } else { - ret.Message = "This token " + ret.Message - } - return []*Result{&ret}, nil - } - - for role, fetchPath := range h.RoleFetchIssues { - if fetchPath != nil && fetchPath.IsSecretPermissionsError() { - delete(h.RoleEntryMap, role) - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: fetchPath.Path, - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - } - } - - crlAutoRebuild := false - if h.CRLConfig != nil { - if h.CRLConfig.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/config/crl", - Message: "This prevents the health check from seeing if the CRL is set to auto_rebuild=true and lowering the severity of check results appropriately.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read authenticated CRL configuration for this mount. " + ret.Message - } else { - ret.Message = "This token lacks so permission to read the CRL configuration for this mount. " + ret.Message - } - - results = append(results, &ret) - } else if h.CRLConfig.Secret != nil && h.CRLConfig.Secret.Data["auto_rebuild"] != nil { - crlAutoRebuild = h.CRLConfig.Secret.Data["auto_rebuild"].(bool) - } - } - - for role, entry := range h.RoleEntryMap { - noStore := entry["no_store"].(bool) - if noStore { - continue - } - - ret := Result{ - Status: ResultWarning, - Endpoint: "/{{mount}}/roles/" + role, - Message: "Role currently stores every issued certificate (no_store=false). Too many issued and/or revoked certificates can exceed Vault's storage limits and make operations slow. It is encouraged to enable auto-rebuild of CRLs to prevent every revocation from creating a new CRL, and to limit the number of certificates issued under roles with no_store=false: use shorter lifetimes and/or BYOC revocation instead.", - } - - if crlAutoRebuild { - ret.Status = ResultInformational - ret.Message = "Role currently stores every issued certificate (no_store=false). With auto-rebuild CRL enabled, less performance impact occur on CRL rebuilding, but note that too many issued and/or revoked certificates can exceed Vault's storage limits and make operations slow. It is suggested to limit the number of certificates issued under roles with no_store=false: use shorter lifetimes to avoid revocation and/or BYOC revocation instead." - } - - results = append(results, &ret) - } - - if len(results) == 0 && len(h.RoleEntryMap) > 0 { - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/roles", - Message: "Roles follow best practices regarding certificate storage.", - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_root_issued_leaves.go b/command/healthcheck/pki_root_issued_leaves.go deleted file mode 100644 index 615684b0df494..0000000000000 --- a/command/healthcheck/pki_root_issued_leaves.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "bytes" - "crypto/x509" - "fmt" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type RootIssuedLeaves struct { - Enabled bool - UnsupportedVersion bool - - CertsToFetch int - - FetchIssues map[string]*PathFetch - RootCertMap map[string]*x509.Certificate - LeafCertMap map[string]*x509.Certificate -} - -func NewRootIssuedLeavesCheck() Check { - return &RootIssuedLeaves{ - FetchIssues: make(map[string]*PathFetch), - RootCertMap: make(map[string]*x509.Certificate), - LeafCertMap: make(map[string]*x509.Certificate), - } -} - -func (h *RootIssuedLeaves) Name() string { - return "root_issued_leaves" -} - -func (h *RootIssuedLeaves) IsEnabled() bool { - return h.Enabled -} - -func (h *RootIssuedLeaves) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "certs_to_fetch": 100, - } -} - -func (h *RootIssuedLeaves) LoadConfig(config map[string]interface{}) error { - count, err := parseutil.SafeParseIntRange(config["certs_to_fetch"], 1, 100000) - if err != nil { - return fmt.Errorf("error parsing %v.certs_to_fetch: %w", h.Name(), err) - } - h.CertsToFetch = int(count) - - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *RootIssuedLeaves) FetchResources(e *Executor) error { - exit, _, issuers, err := pkiFetchIssuersList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - return err - } - - for _, issuer := range issuers { - skip, pathFetch, cert, err := pkiFetchIssuer(e, issuer, func() { - h.UnsupportedVersion = true - }) - h.FetchIssues[issuer] = pathFetch - if skip || err != nil { - if err != nil { - return err - } - continue - } - - // Ensure we only check Root CAs. - if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { - continue - } - if err := cert.CheckSignatureFrom(cert); err != nil { - continue - } - - h.RootCertMap[issuer] = cert - } - - exit, f, leaves, err := pkiFetchLeavesList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - if f != nil && f.IsSecretPermissionsError() { - for _, issuer := range issuers { - h.FetchIssues[issuer] = f - } - } - return err - } - - var leafCount int - for _, serial := range leaves { - if leafCount >= h.CertsToFetch { - break - } - - skip, _, cert, err := pkiFetchLeaf(e, serial, func() { - h.UnsupportedVersion = true - }) - if skip || err != nil { - if err != nil { - return err - } - continue - } - - // Ignore other CAs. - if cert.BasicConstraintsValid && cert.IsCA { - continue - } - - leafCount += 1 - h.LeafCertMap[serial] = cert - } - - return nil -} - -func (h *RootIssuedLeaves) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/issuers", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - for issuer, fetchPath := range h.FetchIssues { - if fetchPath != nil && fetchPath.IsSecretPermissionsError() { - delete(h.RootCertMap, issuer) - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: fetchPath.Path, - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - } - } - - issuerHasLeaf := make(map[string]bool) - for serial, leaf := range h.LeafCertMap { - if len(issuerHasLeaf) == len(h.RootCertMap) { - break - } - - for issuer, root := range h.RootCertMap { - if issuerHasLeaf[issuer] { - continue - } - - if !bytes.Equal(leaf.RawIssuer, root.RawSubject) { - continue - } - - if err := leaf.CheckSignatureFrom(root); err != nil { - continue - } - - ret := Result{ - Status: ResultWarning, - Endpoint: "/{{mount}}/issuer/" + issuer, - Message: fmt.Sprintf("Root issuer has directly issued non-CA leaf certificates (%v) instead of via an intermediate CA. This can make rotating the root CA harder as direct cross-signing of the roots must be used, rather than cross-signing of the intermediates. It is encouraged to set up and use an intermediate CA and tidy the mount when all directly issued leaves have expired.", serial), - } - - issuerHasLeaf[issuer] = true - - results = append(results, &ret) - } - } - - if len(results) == 0 && len(h.RootCertMap) > 0 { - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/certs", - Message: "Root certificate(s) in this mount have not directly issued non-CA leaf certificates.", - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_tidy_last_run.go b/command/healthcheck/pki_tidy_last_run.go deleted file mode 100644 index 9d07369c877d8..0000000000000 --- a/command/healthcheck/pki_tidy_last_run.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/logical" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type TidyLastRun struct { - Enabled bool - UnsupportedVersion bool - - LastRunCritical time.Duration - LastRunWarning time.Duration - - TidyStatus *PathFetch -} - -func NewTidyLastRunCheck() Check { - return &TidyLastRun{} -} - -func (h *TidyLastRun) Name() string { - return "tidy_last_run" -} - -func (h *TidyLastRun) IsEnabled() bool { - return h.Enabled -} - -func (h *TidyLastRun) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "last_run_critical": "7d", - "last_run_warning": "2d", - } -} - -func (h *TidyLastRun) LoadConfig(config map[string]interface{}) error { - var err error - h.LastRunCritical, err = parseutil.ParseDurationSecond(config["last_run_critical"]) - if err != nil { - return fmt.Errorf("failed to parse parameter %v.%v=%v: %w", h.Name(), "last_run_critical", config["last_run_critical"], err) - } - - h.LastRunWarning, err = parseutil.ParseDurationSecond(config["last_run_warning"]) - if err != nil { - return fmt.Errorf("failed to parse parameter %v.%v=%v: %w", h.Name(), "last_run_warning", config["last_run_warning"], err) - } - - enabled, err := parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - h.Enabled = enabled - - return nil -} - -func (h *TidyLastRun) FetchResources(e *Executor) error { - var err error - - h.TidyStatus, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/tidy-status") - if err != nil { - return fmt.Errorf("failed to fetch mount's tidy-status value: %v", err) - } - - if h.TidyStatus.IsUnsupportedPathError() { - h.UnsupportedVersion = true - } - - return nil -} - -func (h *TidyLastRun) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - // Shouldn't happen; roles have been around forever. - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/tidy-status", - Message: "This health check requires Vault 1.10+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.TidyStatus == nil { - return nil, nil - } - - if h.TidyStatus.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/tidy-status", - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable read tidy status endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to read the tidy status endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - } - - baseMsg := "Tidy hasn't run in the last %v; this can point to problems with the mount's auto-tidy configuration or an external tidy executor; this can impact PKI's and Vault's performance if not run regularly." - - if h.TidyStatus.Secret != nil && h.TidyStatus.Secret.Data != nil { - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/tidy-status", - Message: "Tidy has run recently on this mount.", - } - - when := h.TidyStatus.Secret.Data["time_finished"] - if when == nil { - ret.Status = ResultCritical - ret.Message = "Tidy hasn't run since this mount was created; this can point to problems with the mount's auto-tidy configuration or an external tidy executor; this can impact PKI's and Vault's performance if not run regularly. It is suggested to enable auto-tidy on this mount." - } else { - now := time.Now() - lastRunCritical := now.Add(-1 * h.LastRunCritical) - lastRunWarning := now.Add(-1 * h.LastRunWarning) - - whenT, err := parseutil.ParseAbsoluteTime(when) - if err != nil { - return nil, fmt.Errorf("error parsing time value (%v): %w", when, err) - } - - if whenT.Before(lastRunCritical) { - ret.Status = ResultCritical - ret.Message = fmt.Sprintf(baseMsg, h.LastRunCritical) - } else if whenT.Before(lastRunWarning) { - ret.Status = ResultWarning - ret.Message = fmt.Sprintf(baseMsg, h.LastRunWarning) - } - } - - results = append(results, &ret) - } - - return -} diff --git a/command/healthcheck/pki_too_many_certs.go b/command/healthcheck/pki_too_many_certs.go deleted file mode 100644 index 59722ab2eff01..0000000000000 --- a/command/healthcheck/pki_too_many_certs.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - - "github.com/hashicorp/go-secure-stdlib/parseutil" -) - -type TooManyCerts struct { - Enabled bool - UnsupportedVersion bool - - CountCritical int - CountWarning int - - CertCounts int - FetchIssue *PathFetch -} - -func NewTooManyCertsCheck() Check { - return &TooManyCerts{} -} - -func (h *TooManyCerts) Name() string { - return "too_many_certs" -} - -func (h *TooManyCerts) IsEnabled() bool { - return h.Enabled -} - -func (h *TooManyCerts) DefaultConfig() map[string]interface{} { - return map[string]interface{}{ - "count_critical": 250000, - "count_warning": 50000, - } -} - -func (h *TooManyCerts) LoadConfig(config map[string]interface{}) error { - value, err := parseutil.SafeParseIntRange(config["count_critical"], 1, 15000000) - if err != nil { - return fmt.Errorf("error parsing %v.count_critical: %w", h.Name(), err) - } - h.CountCritical = int(value) - - value, err = parseutil.SafeParseIntRange(config["count_warning"], 1, 15000000) - if err != nil { - return fmt.Errorf("error parsing %v.count_warning: %w", h.Name(), err) - } - h.CountWarning = int(value) - - h.Enabled, err = parseutil.ParseBool(config["enabled"]) - if err != nil { - return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) - } - - return nil -} - -func (h *TooManyCerts) FetchResources(e *Executor) error { - exit, leavesRet, _, err := pkiFetchLeavesList(e, func() { - h.UnsupportedVersion = true - }) - h.FetchIssue = leavesRet - - if exit || err != nil { - return err - } - - h.CertCounts = leavesRet.ParsedCache["count"].(int) - - return nil -} - -func (h *TooManyCerts) Evaluate(e *Executor) (results []*Result, err error) { - if h.UnsupportedVersion { - // Shouldn't happen; /certs has been around forever. - ret := Result{ - Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/certs", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", - } - return []*Result{&ret}, nil - } - - if h.FetchIssue != nil && h.FetchIssue.IsSecretPermissionsError() { - ret := Result{ - Status: ResultInsufficientPermissions, - Endpoint: h.FetchIssue.Path, - Message: "Without this information, this health check is unable to function.", - } - - if e.Client.Token() == "" { - ret.Message = "No token available so unable to list the endpoint for this mount. " + ret.Message - } else { - ret.Message = "This token lacks permission to list the endpoint for this mount. " + ret.Message - } - - results = append(results, &ret) - return - } - - ret := Result{ - Status: ResultOK, - Endpoint: "/{{mount}}/certs", - Message: "This mount has an OK number of stored certificates.", - } - - baseMsg := "This PKI mount has %v outstanding stored certificates; consider using no_store=false on roles, running tidy operations periodically, and using shorter certificate lifetimes to reduce the storage pressure on this mount." - if h.CertCounts >= h.CountCritical { - ret.Status = ResultCritical - ret.Message = fmt.Sprintf(baseMsg, h.CertCounts) - } else if h.CertCounts >= h.CountWarning { - ret.Status = ResultWarning - ret.Message = fmt.Sprintf(baseMsg, h.CertCounts) - } - - results = append(results, &ret) - - return -} diff --git a/command/healthcheck/shared.go b/command/healthcheck/shared.go deleted file mode 100644 index 4097704da2c49..0000000000000 --- a/command/healthcheck/shared.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - - "github.com/hashicorp/vault/sdk/logical" -) - -func StringList(source interface{}) ([]string, error) { - if source == nil { - return nil, nil - } - - if value, ok := source.([]string); ok { - return value, nil - } - - if rValues, ok := source.([]interface{}); ok { - var result []string - for index, rValue := range rValues { - value, ok := rValue.(string) - if !ok { - return nil, fmt.Errorf("unknown source type for []string coercion at index %v: %T", index, rValue) - } - - result = append(result, value) - } - - return result, nil - } - - return nil, fmt.Errorf("unknown source type for []string coercion: %T", source) -} - -func fetchMountTune(e *Executor, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { - tuneRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/sys/mounts/{{mount}}/tune") - if err != nil { - return true, nil, nil, fmt.Errorf("failed to fetch mount tune information: %w", err) - } - - if !tuneRet.IsSecretOK() { - if tuneRet.IsUnsupportedPathError() { - versionError() - } - - return true, tuneRet, nil, nil - } - - var data map[string]interface{} = nil - if len(tuneRet.Secret.Data) > 0 { - data = tuneRet.Secret.Data - } - - return false, tuneRet, data, nil -} diff --git a/command/healthcheck/util.go b/command/healthcheck/util.go deleted file mode 100644 index dd5d66e88573d..0000000000000 --- a/command/healthcheck/util.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package healthcheck - -import ( - "fmt" - "time" -) - -var ( - oneDay = 24 * time.Hour - oneWeek = 7 * oneDay - oneMonth = 30 * oneDay - oneYear = 365 * oneDay -) - -var suffixDurationMap = map[string]time.Duration{ - "y": oneYear, - "mo": oneMonth, - "w": oneWeek, - "d": oneDay, -} -var orderedSuffixes = []string{"y", "mo", "w", "d"} - -func FormatDuration(d time.Duration) string { - var result string - for _, suffix := range orderedSuffixes { - unit := suffixDurationMap[suffix] - if d > unit { - quantity := int64(d / unit) - result = fmt.Sprintf("%v%v%v", quantity, suffix, result) - d = d - (time.Duration(quantity) * unit) - } - } - - if d > 0 { - result = d.String() + result - } - - return result -} diff --git a/command/kv.go b/command/kv.go index b0834c78c71dc..2172576dbd6fa 100644 --- a/command/kv.go +++ b/command/kv.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/kv_delete.go b/command/kv_delete.go index a365c89163082..5555b9c71a895 100644 --- a/command/kv_delete.go +++ b/command/kv_delete.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -156,7 +153,7 @@ func (c *KVDeleteCommand) Run(args []string) int { var fullPath string if v2 { secret, err = c.deleteV2(partialPath, mountPath, client) - fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + fullPath = addPrefixToKVPath(partialPath, mountPath, "data") } else { // v1 if mountFlagSyntax { @@ -195,13 +192,13 @@ func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) ( var secret *api.Secret switch { case len(c.flagVersions) > 0: - path = addPrefixToKVPath(path, mountPath, "delete", false) + path = addPrefixToKVPath(path, mountPath, "delete") data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } secret, err = client.Logical().Write(path, data) default: - path = addPrefixToKVPath(path, mountPath, "data", false) + path = addPrefixToKVPath(path, mountPath, "data") secret, err = client.Logical().Delete(path) } diff --git a/command/kv_destroy.go b/command/kv_destroy.go index 1167ec814838e..45cbca02518bc 100644 --- a/command/kv_destroy.go +++ b/command/kv_destroy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -155,7 +152,7 @@ func (c *KVDestroyCommand) Run(args []string) int { c.UI.Error("Destroy not supported on KV Version 1") return 1 } - destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy", false) + destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy") if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/kv_enable_versioning.go b/command/kv_enable_versioning.go index 8282dd296ff50..9c2a601432847 100644 --- a/command/kv_enable_versioning.go +++ b/command/kv_enable_versioning.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/kv_get.go b/command/kv_get.go index 1d0e330e71c49..057a787c4e286 100644 --- a/command/kv_get.go +++ b/command/kv_get.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -152,7 +149,7 @@ func (c *KVGetCommand) Run(args []string) int { var fullPath string // Add /data to v2 paths only if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + fullPath = addPrefixToKVPath(partialPath, mountPath, "data") if c.flagVersion > 0 { versionParam = map[string]string{ diff --git a/command/kv_helpers.go b/command/kv_helpers.go index adf2ec3fdcedc..b362c3bb07136 100644 --- a/command/kv_helpers.go +++ b/command/kv_helpers.go @@ -1,15 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( - "context" "errors" "fmt" "io" - paths "path" - "sort" + "path" "strings" "github.com/hashicorp/go-secure-stdlib/strutil" @@ -80,9 +75,7 @@ func kvPreflightVersionRequest(client *api.Client, path string) (string, int, er err = fmt.Errorf( `This output flag requires the success of a preflight request to determine the version of a KV secrets engine. Please -re-run this command with a token with read access to %s. -Note that if the path you are trying to reach is a KV v2 path, your token's policy must -allow read access to that path in the format 'mount-path/data/foo', not just 'mount-path/foo'.`, path) +re-run this command with a token with read access to %s`, path) } } @@ -128,15 +121,15 @@ func isKVv2(path string, client *api.Client) (string, bool, error) { return mountPath, version == 2, nil } -func addPrefixToKVPath(path, mountPath, apiPrefix string, skipIfExists bool) string { - if path == mountPath || path == strings.TrimSuffix(mountPath, "/") { - return paths.Join(mountPath, apiPrefix) +func addPrefixToKVPath(p, mountPath, apiPrefix string) string { + if p == mountPath || p == strings.TrimSuffix(mountPath, "/") { + return path.Join(mountPath, apiPrefix) } - pathSuffix := strings.TrimPrefix(path, mountPath) + tp := strings.TrimPrefix(p, mountPath) for { // If the entire mountPath is included in the path, we are done - if pathSuffix != path { + if tp != p { break } // Trim the parts of the mountPath that are not included in the @@ -147,16 +140,10 @@ func addPrefixToKVPath(path, mountPath, apiPrefix string, skipIfExists bool) str break } mountPath = strings.TrimSuffix(partialMountPath[1], "/") - pathSuffix = strings.TrimPrefix(pathSuffix, mountPath) - } - - if skipIfExists { - if strings.HasPrefix(pathSuffix, apiPrefix) || strings.HasPrefix(pathSuffix, "/"+apiPrefix) { - return paths.Join(mountPath, pathSuffix) - } + tp = strings.TrimPrefix(tp, mountPath) } - return paths.Join(mountPath, apiPrefix, pathSuffix) + return path.Join(mountPath, apiPrefix, tp) } func getHeaderForMap(header string, data map[string]interface{}) string { @@ -205,65 +192,3 @@ func padEqualSigns(header string, totalLen int) string { return fmt.Sprintf("%s %s %s", strings.Repeat("=", equalSigns/2), header, strings.Repeat("=", equalSigns/2)) } - -// walkSecretsTree dfs-traverses the secrets tree rooted at the given path -// and calls the `visit` functor for each of the directory and leaf paths. -// Note: for kv-v2, a "metadata" path is expected and "metadata" paths will be -// returned in the visit functor. -func walkSecretsTree(ctx context.Context, client *api.Client, path string, visit func(path string, directory bool) error) error { - resp, err := client.Logical().ListWithContext(ctx, path) - if err != nil { - return fmt.Errorf("could not list %q path: %w", path, err) - } - - if resp == nil || resp.Data == nil { - return fmt.Errorf("no value found at %q: %w", path, err) - } - - keysRaw, ok := resp.Data["keys"] - if !ok { - return fmt.Errorf("unexpected list response at %q", path) - } - - keysRawSlice, ok := keysRaw.([]interface{}) - if !ok { - return fmt.Errorf("unexpected list response type %T at %q", keysRaw, path) - } - - keys := make([]string, 0, len(keysRawSlice)) - - for _, keyRaw := range keysRawSlice { - key, ok := keyRaw.(string) - if !ok { - return fmt.Errorf("unexpected key type %T at %q", keyRaw, path) - } - keys = append(keys, key) - } - - // sort the keys for a deterministic output - sort.Strings(keys) - - for _, key := range keys { - // the keys are relative to the current path: combine them - child := paths.Join(path, key) - - if strings.HasSuffix(key, "/") { - // visit the directory - if err := visit(child, true); err != nil { - return err - } - - // this is not a leaf node: we need to go deeper... - if err := walkSecretsTree(ctx, client, child, visit); err != nil { - return err - } - } else { - // this is a leaf node: add it to the list - if err := visit(child, false); err != nil { - return err - } - } - } - - return nil -} diff --git a/command/kv_helpers_test.go b/command/kv_helpers_test.go deleted file mode 100644 index ca5f7e8c5ac64..0000000000000 --- a/command/kv_helpers_test.go +++ /dev/null @@ -1,272 +0,0 @@ -package command - -import ( - "context" - "reflect" - "testing" - "time" - - "github.com/hashicorp/vault/api" -) - -// TestAddPrefixToKVPath tests the addPrefixToKVPath helper function -func TestAddPrefixToKVPath(t *testing.T) { - cases := map[string]struct { - path string - mountPath string - apiPrefix string - skipIfExists bool - expected string - }{ - "simple": { - path: "kv-v2/foo", - mountPath: "kv-v2/", - apiPrefix: "data", - skipIfExists: false, - expected: "kv-v2/data/foo", - }, - - "multi-part": { - path: "my/kv-v2/mount/path/foo/bar/baz", - mountPath: "my/kv-v2/mount/path", - apiPrefix: "metadata", - skipIfExists: false, - expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", - }, - - "with-namespace": { - path: "my/kv-v2/mount/path/foo/bar/baz", - mountPath: "my/ns1/my/kv-v2/mount/path", - apiPrefix: "metadata", - skipIfExists: false, - expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", - }, - - "skip-if-exists-true": { - path: "kv-v2/data/foo", - mountPath: "kv-v2/", - apiPrefix: "data", - skipIfExists: true, - expected: "kv-v2/data/foo", - }, - - "skip-if-exists-false": { - path: "kv-v2/data/foo", - mountPath: "kv-v2", - apiPrefix: "data", - skipIfExists: false, - expected: "kv-v2/data/data/foo", - }, - - "skip-if-exists-with-namespace": { - path: "my/kv-v2/mount/path/metadata/foo/bar/baz", - mountPath: "my/ns1/my/kv-v2/mount/path", - apiPrefix: "metadata", - skipIfExists: true, - expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", - }, - } - - for name, tc := range cases { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - actual := addPrefixToKVPath( - tc.path, - tc.mountPath, - tc.apiPrefix, - tc.skipIfExists, - ) - - if tc.expected != actual { - t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, actual) - } - }) - } -} - -// TestWalkSecretsTree tests the walkSecretsTree helper function -func TestWalkSecretsTree(t *testing.T) { - // test setup - client, closer := testVaultServer(t) - defer closer() - - // enable kv-v1 backend - if err := client.Sys().Mount("kv-v1/", &api.MountInput{ - Type: "kv-v1", - }); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - // enable kv-v2 backend - if err := client.Sys().Mount("kv-v2/", &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelContextFunc() - - // populate secrets - for _, path := range []string{ - "foo", - "app-1/foo", - "app-1/bar", - "app-1/nested/x/y/z", - "app-1/nested/x/y", - "app-1/nested/bar", - } { - if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ - "password": "Hashi123", - }); err != nil { - t.Fatal(err) - } - - if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ - "password": "Hashi123", - }); err != nil { - t.Fatal(err) - } - } - - type treePath struct { - path string - directory bool - } - - cases := map[string]struct { - path string - expected []treePath - expectedError bool - }{ - "kv-v1-simple": { - path: "kv-v1/app-1/nested/x/y", - expected: []treePath{ - {path: "kv-v1/app-1/nested/x/y/z", directory: false}, - }, - expectedError: false, - }, - - "kv-v2-simple": { - path: "kv-v2/metadata/app-1/nested/x/y", - expected: []treePath{ - {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, - }, - expectedError: false, - }, - - "kv-v1-nested": { - path: "kv-v1/app-1/nested/", - expected: []treePath{ - {path: "kv-v1/app-1/nested/bar", directory: false}, - {path: "kv-v1/app-1/nested/x", directory: true}, - {path: "kv-v1/app-1/nested/x/y", directory: false}, - {path: "kv-v1/app-1/nested/x/y", directory: true}, - {path: "kv-v1/app-1/nested/x/y/z", directory: false}, - }, - expectedError: false, - }, - - "kv-v2-nested": { - path: "kv-v2/metadata/app-1/nested/", - expected: []treePath{ - {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, - {path: "kv-v2/metadata/app-1/nested/x", directory: true}, - {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, - {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, - {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, - }, - expectedError: false, - }, - - "kv-v1-all": { - path: "kv-v1", - expected: []treePath{ - {path: "kv-v1/app-1", directory: true}, - {path: "kv-v1/app-1/bar", directory: false}, - {path: "kv-v1/app-1/foo", directory: false}, - {path: "kv-v1/app-1/nested", directory: true}, - {path: "kv-v1/app-1/nested/bar", directory: false}, - {path: "kv-v1/app-1/nested/x", directory: true}, - {path: "kv-v1/app-1/nested/x/y", directory: false}, - {path: "kv-v1/app-1/nested/x/y", directory: true}, - {path: "kv-v1/app-1/nested/x/y/z", directory: false}, - {path: "kv-v1/foo", directory: false}, - }, - expectedError: false, - }, - - "kv-v2-all": { - path: "kv-v2/metadata", - expected: []treePath{ - {path: "kv-v2/metadata/app-1", directory: true}, - {path: "kv-v2/metadata/app-1/bar", directory: false}, - {path: "kv-v2/metadata/app-1/foo", directory: false}, - {path: "kv-v2/metadata/app-1/nested", directory: true}, - {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, - {path: "kv-v2/metadata/app-1/nested/x", directory: true}, - {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, - {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, - {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, - {path: "kv-v2/metadata/foo", directory: false}, - }, - expectedError: false, - }, - - "kv-v1-not-found": { - path: "kv-v1/does/not/exist", - expected: nil, - expectedError: true, - }, - - "kv-v2-not-found": { - path: "kv-v2/metadata/does/not/exist", - expected: nil, - expectedError: true, - }, - - "kv-v1-not-listable-leaf-node": { - path: "kv-v1/foo", - expected: nil, - expectedError: true, - }, - - "kv-v2-not-listable-leaf-node": { - path: "kv-v2/metadata/foo", - expected: nil, - expectedError: true, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - var descendants []treePath - - err := walkSecretsTree(ctx, client, tc.path, func(path string, directory bool) error { - descendants = append(descendants, treePath{ - path: path, - directory: directory, - }) - return nil - }) - - if tc.expectedError { - if err == nil { - t.Fatal("an error was expected but the test succeeded") - } - } else { - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(tc.expected, descendants) { - t.Fatalf("unexpected list output; want: %v, got: %v", tc.expected, descendants) - } - } - }) - } -} diff --git a/command/kv_list.go b/command/kv_list.go index 25ad4d2a7058b..b6b665c6f55ed 100644 --- a/command/kv_list.go +++ b/command/kv_list.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "fmt" - "path" "strings" "github.com/mitchellh/cli" @@ -19,7 +15,6 @@ var ( type KVListCommand struct { *BaseCommand - flagMount string } func (c *KVListCommand) Synopsis() string { @@ -45,23 +40,7 @@ Usage: vault kv list [options] PATH } func (c *KVListCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) - - return set + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) } func (c *KVListCommand) AutocompleteArgs() complete.Predictor { @@ -83,11 +62,8 @@ func (c *KVListCommand) Run(args []string) int { args = f.Args() switch { case len(args) < 1: - if c.flagMount == "" { - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - } - args = []string{""} + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 case len(args) > 1: c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) return 1 @@ -99,56 +75,31 @@ func (c *KVListCommand) Run(args []string) int { return 2 } - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + // Append trailing slash + path := args[0] + if !strings.HasSuffix(path, "/") { + path += "/" + } - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + // Sanitize path + path = sanitizePath(path) + mountPath, v2, err := isKVv2(path, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 } - // Add /metadata to v2 paths only - var fullPath string if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "metadata", false) - } else { - // v1 - if mountFlagSyntax { - fullPath = path.Join(mountPath, partialPath) - } else { - fullPath = partialPath + path = addPrefixToKVPath(path, mountPath, "metadata") + if err != nil { + c.UI.Error(err.Error()) + return 2 } } - secret, err := client.Logical().List(fullPath) + secret, err := client.Logical().List(path) if err != nil { - c.UI.Error(fmt.Sprintf("Error listing %s: %s", fullPath, err)) + c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) return 2 } @@ -166,12 +117,12 @@ func (c *KVListCommand) Run(args []string) int { } if secret == nil || secret.Data == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + c.UI.Error(fmt.Sprintf("No value found at %s", path)) return 2 } if !ok { - c.UI.Error(fmt.Sprintf("No entries found at %s", fullPath)) + c.UI.Error(fmt.Sprintf("No entries found at %s", path)) return 2 } diff --git a/command/kv_metadata.go b/command/kv_metadata.go index 4350311aff326..c4ab37910555a 100644 --- a/command/kv_metadata.go +++ b/command/kv_metadata.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go index 6217506f23b07..cff16f21c6fe2 100644 --- a/command/kv_metadata_delete.go +++ b/command/kv_metadata_delete.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -138,7 +135,7 @@ func (c *KVMetadataDeleteCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") if secret, err := client.Logical().Delete(fullPath); err != nil { c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) if secret != nil { diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go index 8d17210741f94..8920340752d5a 100644 --- a/command/kv_metadata_get.go +++ b/command/kv_metadata_get.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -140,7 +137,7 @@ func (c *KVMetadataGetCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") secret, err := client.Logical().Read(fullPath) if err != nil { c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) diff --git a/command/kv_metadata_patch.go b/command/kv_metadata_patch.go index 60d1a3e8b20fe..3a51d8db12154 100644 --- a/command/kv_metadata_patch.go +++ b/command/kv_metadata_patch.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -23,13 +20,12 @@ var ( type KVMetadataPatchCommand struct { *BaseCommand - flagMaxVersions int - flagCASRequired BoolPtr - flagDeleteVersionAfter time.Duration - flagCustomMetadata map[string]string - flagRemoveCustomMetadata []string - flagMount string - testStdin io.Reader // for tests + flagMaxVersions int + flagCASRequired BoolPtr + flagDeleteVersionAfter time.Duration + flagCustomMetadata map[string]string + flagMount string + testStdin io.Reader // for tests } func (c *KVMetadataPatchCommand) Synopsis() string { @@ -38,7 +34,7 @@ func (c *KVMetadataPatchCommand) Synopsis() string { func (c *KVMetadataPatchCommand) Help() string { helpText := ` -Usage: vault kv metadata patch [options] KEY +Usage: vault metadata kv patch [options] KEY This command can be used to create a blank key in the key-value store or to update key configuration for a specified key. @@ -69,10 +65,6 @@ Usage: vault kv metadata patch [options] KEY $ vault kv metadata patch -mount=secret -custom-metadata=foo=abc -custom-metadata=bar=123 foo - To remove custom meta data from the corresponding path in the key-value store, kv metadata patch can be used. - - $ vault kv metadata patch -mount=secret -remove-custom-metadata=bar foo - Additional flags and more advanced use cases are detailed below. ` + c.Flags().Help() @@ -119,13 +111,6 @@ func (c *KVMetadataPatchCommand) Flags() *FlagSets { This can be specified multiple times to add multiple pieces of metadata.`, }) - f.StringSliceVar(&StringSliceVar{ - Name: "remove-custom-metadata", - Target: &c.flagRemoveCustomMetadata, - Default: []string{}, - Usage: "Key to remove from custom metadata. To specify multiple values, specify this flag multiple times.", - }) - f.StringVar(&StringVar{ Name: "mount", Target: &c.flagMount, @@ -211,9 +196,9 @@ func (c *KVMetadataPatchCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") - data := make(map[string]interface{}, 0) + data := map[string]interface{}{} if c.flagMaxVersions >= 0 { data["max_versions"] = c.flagMaxVersions @@ -227,19 +212,10 @@ func (c *KVMetadataPatchCommand) Run(args []string) int { data["delete_version_after"] = c.flagDeleteVersionAfter.String() } - customMetadata := make(map[string]interface{}) - - for key, value := range c.flagCustomMetadata { - customMetadata[key] = value - } - - for _, key := range c.flagRemoveCustomMetadata { - // A null in a JSON merge patch payload will remove the associated key - customMetadata[key] = nil + if len(c.flagCustomMetadata) > 0 { + data["custom_metadata"] = c.flagCustomMetadata } - data["custom_metadata"] = customMetadata - secret, err := client.Logical().JSONMergePatch(context.Background(), fullPath, data) if err != nil { c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) diff --git a/command/kv_metadata_patch_test.go b/command/kv_metadata_patch_test.go index 58f4f91523321..40b74dc8d9ee5 100644 --- a/command/kv_metadata_patch_test.go +++ b/command/kv_metadata_patch_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -125,29 +122,6 @@ func TestKvMetadataPatchCommand_Flags(t *testing.T) { }, }, }, - { - "remove-custom_metadata", - []string{"-custom-metadata=baz=ghi", "-remove-custom-metadata=foo"}, - "Success!", - 0, - map[string]interface{}{ - "custom_metadata": map[string]interface{}{ - "bar": "def", - "baz": "ghi", - }, - }, - }, - { - "remove-custom_metadata-multiple", - []string{"-custom-metadata=baz=ghi", "-remove-custom-metadata=foo", "-remove-custom-metadata=bar"}, - "Success!", - 0, - map[string]interface{}{ - "custom_metadata": map[string]interface{}{ - "baz": "ghi", - }, - }, - }, { "delete_version_after_success", []string{"-delete-version-after=5s"}, diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 9ec43e0baec18..7494204f436b8 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -36,7 +33,7 @@ func (c *KVMetadataPutCommand) Synopsis() string { func (c *KVMetadataPutCommand) Help() string { helpText := ` -Usage: vault kv metadata put [options] KEY +Usage: vault metadata kv put [options] KEY This command can be used to create a blank key in the key-value store or to update key configuration for a specified key. @@ -199,7 +196,7 @@ func (c *KVMetadataPutCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") data := map[string]interface{}{} if c.flagMaxVersions >= 0 { diff --git a/command/kv_metadata_put_test.go b/command/kv_metadata_put_test.go index 008ded9fdb8d8..a952802cc4699 100644 --- a/command/kv_metadata_put_test.go +++ b/command/kv_metadata_put_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/kv_patch.go b/command/kv_patch.go index 3f5080d4de70a..37fca84510ad9 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -24,11 +21,10 @@ var ( type KVPatchCommand struct { *BaseCommand - flagCAS int - flagMethod string - flagMount string - testStdin io.Reader // for tests - flagRemoveData []string + flagCAS int + flagMethod string + flagMount string + testStdin io.Reader // for tests } func (c *KVPatchCommand) Synopsis() string { @@ -80,10 +76,6 @@ Usage: vault kv patch [options] KEY [DATA] $ vault kv patch -mount=secret -method=rw foo bar=baz - To remove data from the corresponding path in the key-value store, kv patch can be used. - - $ vault kv patch -mount=secret -remove-data=bar foo - Additional flags and more advanced use cases are detailed below. ` + c.Flags().Help() @@ -125,13 +117,6 @@ func (c *KVPatchCommand) Flags() *FlagSets { v2 secrets.`, }) - f.StringSliceVar(&StringSliceVar{ - Name: "remove-data", - Target: &c.flagRemoveData, - Default: []string{}, - Usage: "Key to remove from data. To specify multiple values, specify this flag multiple times.", - }) - return set } @@ -162,7 +147,7 @@ func (c *KVPatchCommand) Run(args []string) int { case len(args) < 1: c.UI.Error(fmt.Sprintf("Not enough arguments (expected >1, got %d)", len(args))) return 1 - case len(c.flagRemoveData) == 0 && len(args) == 1: + case len(args) == 1: c.UI.Error("Must supply data") return 1 } @@ -220,22 +205,12 @@ func (c *KVPatchCommand) Run(args []string) int { return 2 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) + fullPath := addPrefixToKVPath(partialPath, mountPath, "data") if err != nil { c.UI.Error(err.Error()) return 2 } - // collecting data to be removed - if newData == nil { - newData = make(map[string]interface{}) - } - - for _, key := range c.flagRemoveData { - // A null in a JSON merge patch payload will remove the associated key - newData[key] = nil - } - // Check the method and behave accordingly var secret *api.Secret var code int diff --git a/command/kv_put.go b/command/kv_put.go index 77c0c6a1129e2..5cc7b6fbc67b8 100644 --- a/command/kv_put.go +++ b/command/kv_put.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -181,7 +178,7 @@ func (c *KVPutCommand) Run(args []string) int { // Add /data to v2 paths only var fullPath string if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + fullPath = addPrefixToKVPath(partialPath, mountPath, "data") data = map[string]interface{}{ "data": data, "options": map[string]interface{}{}, diff --git a/command/kv_rollback.go b/command/kv_rollback.go index c54d7bc2f6352..0d782619a8320 100644 --- a/command/kv_rollback.go +++ b/command/kv_rollback.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -164,7 +161,7 @@ func (c *KVRollbackCommand) Run(args []string) int { return 2 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) + fullPath := addPrefixToKVPath(partialPath, mountPath, "data") if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/kv_test.go b/command/kv_test.go index 6564208ed3353..162113b8b5a95 100644 --- a/command/kv_test.go +++ b/command/kv_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -593,131 +590,6 @@ func TestKVGetCommand(t *testing.T) { }) } -func testKVListCommand(tb testing.TB) (*cli.MockUi, *KVListCommand) { - tb.Helper() - ui := cli.NewMockUi() - cmd := &KVListCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } - - return ui, cmd -} - -// TestKVListCommand runs tests for `vault kv list` -func TestKVListCommand(t *testing.T) { - testCases := []struct { - name string - args []string - outStrings []string - code int - }{ - { - name: "default", - args: []string{"kv/my-prefix"}, - outStrings: []string{"secret-0", "secret-1", "secret-2"}, - code: 0, - }, - { - name: "not_enough_args", - args: []string{}, - outStrings: []string{"Not enough arguments"}, - code: 1, - }, - { - name: "v2_default_with_mount", - args: []string{"-mount", "kv", "my-prefix"}, - outStrings: []string{"secret-0", "secret-1", "secret-2"}, - code: 0, - }, - { - name: "v1_default_with_mount", - args: []string{"kv/my-prefix"}, - outStrings: []string{"secret-0", "secret-1", "secret-2"}, - code: 0, - }, - { - name: "v2_not_found", - args: []string{"kv/nope/not/once/never"}, - outStrings: []string{"No value found at kv/metadata/nope/not/once/never"}, - code: 2, - }, - { - name: "v1_mount_only", - args: []string{"kv"}, - outStrings: []string{"my-prefix"}, - code: 0, - }, - { - name: "v2_mount_only", - args: []string{"-mount", "kv"}, - outStrings: []string{"my-prefix"}, - code: 0, - }, - { - // this is behavior that should be tested - // `kv` here is an explicit mount - // `my-prefix` is not - // the current kv code will ignore `my-prefix` - name: "ignore_multi_part_mounts", - args: []string{"-mount", "kv/my-prefix"}, - outStrings: []string{"my-prefix"}, - code: 0, - }, - } - - t.Run("validations", func(t *testing.T) { - t.Parallel() - - for _, testCase := range testCases { - testCase := testCase - - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - // test setup - client, closer := testVaultServer(t) - defer closer() - - // enable kv-v2 backend - if err := client.Sys().Mount("kv/", &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - ctx := context.Background() - for i := 0; i < 3; i++ { - path := fmt.Sprintf("my-prefix/secret-%d", i) - _, err := client.KVv2("kv/").Put(ctx, path, map[string]interface{}{ - "foo": "bar", - }) - if err != nil { - t.Fatal(err) - } - } - - ui, cmd := testKVListCommand(t) - cmd.client = client - - code := cmd.Run(testCase.args) - if code != testCase.code { - t.Errorf("expected %d to be %d", code, testCase.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - for _, str := range testCase.outStrings { - if !strings.Contains(combined, str) { - t.Errorf("expected %q to contain %q", combined, str) - } - } - }) - } - }) -} - func testKVMetadataGetCommand(tb testing.TB) (*cli.MockUi, *KVMetadataGetCommand) { tb.Helper() @@ -898,7 +770,6 @@ func TestKVPatchCommand_ArgValidation(t *testing.T) { } for _, tc := range cases { - tc := tc // capture range variable t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/command/kv_undelete.go b/command/kv_undelete.go index 25de58835f0bd..90ea608a73165 100644 --- a/command/kv_undelete.go +++ b/command/kv_undelete.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -150,7 +147,7 @@ func (c *KVUndeleteCommand) Run(args []string) int { return 1 } - undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete", false) + undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete") data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } diff --git a/command/lease.go b/command/lease.go index 29ef79fc2d8cd..76f6cc174c41b 100644 --- a/command/lease.go +++ b/command/lease.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/lease_lookup.go b/command/lease_lookup.go index ef53ce5286353..c72c6a174f2ce 100644 --- a/command/lease_lookup.go +++ b/command/lease_lookup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/lease_lookup_test.go b/command/lease_lookup_test.go index 536c29c58eeeb..4de63200f5ce7 100644 --- a/command/lease_lookup_test.go +++ b/command/lease_lookup_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/lease_renew.go b/command/lease_renew.go index aad41d66b7952..13eb95ed00934 100644 --- a/command/lease_renew.go +++ b/command/lease_renew.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/lease_renew_test.go b/command/lease_renew_test.go index c24b812b911ac..aa3b32d0d8b34 100644 --- a/command/lease_renew_test.go +++ b/command/lease_renew_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/lease_revoke.go b/command/lease_revoke.go index 5efd5ecf198b5..1fc90eff7cb50 100644 --- a/command/lease_revoke.go +++ b/command/lease_revoke.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/lease_revoke_test.go b/command/lease_revoke_test.go index 261041e2331a8..1aa58c38ac765 100644 --- a/command/lease_revoke_test.go +++ b/command/lease_revoke_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/list.go b/command/list.go index 028f0d391c6c0..9831b6633c8c2 100644 --- a/command/list.go +++ b/command/list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -81,7 +78,13 @@ func (c *ListCommand) Run(args []string) int { return 2 } - path := sanitizePath(args[0]) + // Append trailing slash + path := args[0] + if !strings.HasSuffix(path, "/") { + path += "/" + } + + path = sanitizePath(path) secret, err := client.Logical().List(path) if err != nil { c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) diff --git a/command/list_test.go b/command/list_test.go index 070184bd91b4a..b1b6680507f16 100644 --- a/command/list_test.go +++ b/command/list_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/log_flags.go b/command/log_flags.go deleted file mode 100644 index 5213d06377ef7..0000000000000 --- a/command/log_flags.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "flag" - "os" - "strconv" - - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/posener/complete" -) - -// logFlags are the 'log' related flags that can be shared across commands. -type logFlags struct { - flagCombineLogs bool - flagLogLevel string - flagLogFormat string - flagLogFile string - flagLogRotateBytes int - flagLogRotateDuration string - flagLogRotateMaxFiles int -} - -// valuesProvider has the intention of providing a way to supply a func with a -// way to retrieve values for flags and environment variables without having to -// directly call a specific implementation. -// The reasoning for its existence is to facilitate testing. -type valuesProvider struct { - flagProvider func(string) (flag.Value, bool) - envVarProvider func(string) (string, bool) -} - -// addLogFlags will add the set of 'log' related flags to a flag set. -func (f *FlagSet) addLogFlags(l *logFlags) { - f.BoolVar(&BoolVar{ - Name: flagNameCombineLogs, - Target: &l.flagCombineLogs, - Default: false, - Hidden: true, - }) - - f.StringVar(&StringVar{ - Name: flagNameLogLevel, - Target: &l.flagLogLevel, - Default: notSetValue, - EnvVar: EnvVaultLogLevel, - Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"), - Usage: "Log verbosity level. Supported values (in order of detail) are " + - "\"trace\", \"debug\", \"info\", \"warn\", and \"error\".", - }) - - f.StringVar(&StringVar{ - Name: flagNameLogFormat, - Target: &l.flagLogFormat, - Default: notSetValue, - EnvVar: EnvVaultLogFormat, - Completion: complete.PredictSet("standard", "json"), - Usage: `Log format. Supported values are "standard" and "json".`, - }) - - f.StringVar(&StringVar{ - Name: flagNameLogFile, - Target: &l.flagLogFile, - Usage: "Path to the log file that Vault should use for logging", - }) - - f.IntVar(&IntVar{ - Name: flagNameLogRotateBytes, - Target: &l.flagLogRotateBytes, - Usage: "Number of bytes that should be written to a log before it needs to be rotated. " + - "Unless specified, there is no limit to the number of bytes that can be written to a log file", - }) - - f.StringVar(&StringVar{ - Name: flagNameLogRotateDuration, - Target: &l.flagLogRotateDuration, - Usage: "The maximum duration a log should be written to before it needs to be rotated. " + - "Must be a duration value such as 30s", - }) - - f.IntVar(&IntVar{ - Name: flagNameLogRotateMaxFiles, - Target: &l.flagLogRotateMaxFiles, - Usage: "The maximum number of older log file archives to keep", - }) -} - -// envVarValue attempts to get a named value from the environment variables. -// The value will be returned as a string along with a boolean value indiciating -// to the caller whether the named env var existed. -func envVarValue(key string) (string, bool) { - if key == "" { - return "", false - } - return os.LookupEnv(key) -} - -// flagValue attempts to find the named flag in a set of FlagSets. -// The flag.Value is returned if it was specified, and the boolean value indicates -// to the caller if the flag was specified by the end user. -func (f *FlagSets) flagValue(flagName string) (flag.Value, bool) { - var result flag.Value - var isFlagSpecified bool - - if f != nil { - f.Visit(func(fl *flag.Flag) { - if fl.Name == flagName { - result = fl.Value - isFlagSpecified = true - } - }) - } - - return result, isFlagSpecified -} - -// overrideValue uses the provided keys to check CLI flags and environment -// variables for values that may be used to override any specified configuration. -func (p *valuesProvider) overrideValue(flagKey, envVarKey string) (string, bool) { - var result string - found := true - - flg, flgFound := p.flagProvider(flagKey) - env, envFound := p.envVarProvider(envVarKey) - - switch { - case flgFound: - result = flg.String() - case envFound: - result = env - default: - found = false - } - - return result, found -} - -// applyLogConfigOverrides will accept a shared config and specifically attempt to update the 'log' related config keys. -// For each 'log' key, we aggregate file config, env vars and CLI flags to select the one with the highest precedence. -// This method mutates the config object passed into it. -func (f *FlagSets) applyLogConfigOverrides(config *configutil.SharedConfig) { - p := &valuesProvider{ - flagProvider: f.flagValue, - envVarProvider: envVarValue, - } - - // Update log level - if val, found := p.overrideValue(flagNameLogLevel, EnvVaultLogLevel); found { - config.LogLevel = val - } - - // Update log format - if val, found := p.overrideValue(flagNameLogFormat, EnvVaultLogFormat); found { - config.LogFormat = val - } - - // Update log file name - if val, found := p.overrideValue(flagNameLogFile, ""); found { - config.LogFile = val - } - - // Update log rotation duration - if val, found := p.overrideValue(flagNameLogRotateDuration, ""); found { - config.LogRotateDuration = val - } - - // Update log max files - if val, found := p.overrideValue(flagNameLogRotateMaxFiles, ""); found { - config.LogRotateMaxFiles, _ = strconv.Atoi(val) - } - - // Update log rotation max bytes - if val, found := p.overrideValue(flagNameLogRotateBytes, ""); found { - config.LogRotateBytes, _ = strconv.Atoi(val) - } -} diff --git a/command/log_flags_test.go b/command/log_flags_test.go deleted file mode 100644 index 38bfa52e95a39..0000000000000 --- a/command/log_flags_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "flag" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestLogFlags_ValuesProvider(t *testing.T) { - cases := map[string]struct { - flagKey string - envVarKey string - wantValue string - wantFound bool - }{ - "flag-missing": { - flagKey: "invalid", - envVarKey: "valid-env-var", - wantValue: "envVarValue", - wantFound: true, - }, - "envVar-missing": { - flagKey: "valid-flag", - envVarKey: "invalid", - wantValue: "flagValue", - wantFound: true, - }, - "all-present": { - flagKey: "valid-flag", - envVarKey: "valid-env-var", - wantValue: "flagValue", - wantFound: true, - }, - "all-missing": { - flagKey: "invalid", - envVarKey: "invalid", - wantValue: "", - wantFound: false, - }, - } - - // Sneaky little fake providers - flagFaker := func(key string) (flag.Value, bool) { - var result fakeFlag - var found bool - - if key == "valid-flag" { - result.Set("flagValue") - found = true - } - - return &result, found - } - - envFaker := func(key string) (string, bool) { - var found bool - var result string - - if key == "valid-env-var" { - result = "envVarValue" - found = true - } - - return result, found - } - - vp := valuesProvider{ - flagProvider: flagFaker, - envVarProvider: envFaker, - } - - for name, tc := range cases { - val, found := vp.overrideValue(tc.flagKey, tc.envVarKey) - assert.Equal(t, tc.wantFound, found, name) - assert.Equal(t, tc.wantValue, val, name) - } -} - -type fakeFlag struct { - value string -} - -func (v *fakeFlag) String() string { - return v.value -} - -func (v *fakeFlag) Set(raw string) error { - v.value = raw - return nil -} diff --git a/command/login.go b/command/login.go index c8bc23254c29a..30352abbb4e59 100644 --- a/command/login.go +++ b/command/login.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/login_test.go b/command/login_test.go index 3d41d8e881470..56ed790f3427f 100644 --- a/command/login_test.go +++ b/command/login_test.go @@ -1,14 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( - "context" - "regexp" "strings" "testing" - "time" "github.com/mitchellh/cli" @@ -43,445 +37,414 @@ func testLoginCommand(tb testing.TB) (*cli.MockUi, *LoginCommand) { } } -func TestCustomPath(t *testing.T) { +func TestLoginCommand_Run(t *testing.T) { t.Parallel() - client, closer := testVaultServer(t) - defer closer() + t.Run("custom_path", func(t *testing.T) { + t.Parallel() - if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/my-auth/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + client, closer := testVaultServer(t) + defer closer() - ui, cmd := testLoginCommand(t) - cmd.client = client - - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - - // Emulate an unknown token format present in ~/.vault-token, for example - client.SetToken("a.a") + if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/my-auth/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - code := cmd.Run([]string{ - "-method", "userpass", - "-path", "my-auth", - "username=test", - "password=test", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + ui, cmd := testLoginCommand(t) + cmd.client = client - expected := "Success! You are now authenticated." - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to be %q", combined, expected) - } + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - storedToken, err := tokenHelper.Get() - if err != nil { - t.Fatal(err) - } + // Emulate an unknown token format present in ~/.vault-token, for example + client.SetToken("a.a") - if l, exp := len(storedToken), minTokenLengthExternal+vault.TokenPrefixLength; l < exp { - t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken) - } -} + code := cmd.Run([]string{ + "-method", "userpass", + "-path", "my-auth", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } -// Do not persist the token to the token helper -func TestNoStore(t *testing.T) { - t.Parallel() + expected := "Success! You are now authenticated." + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to be %q", combined, expected) + } - client, closer := testVaultServer(t) - defer closer() + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", + if l, exp := len(storedToken), minTokenLengthExternal+vault.TokenPrefixLength; l < exp { + t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken) + } }) - if err != nil { - t.Fatal(err) - } - token := secret.Auth.ClientToken - _, cmd := testLoginCommand(t) - cmd.client = client + t.Run("no_store", func(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + client, closer := testVaultServer(t) + defer closer() - // Ensure we have no token to start - if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { - t.Errorf("expected token helper to be empty: %s: %q", err, storedToken) - } + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken - code := cmd.Run([]string{ - "-no-store", - token, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + _, cmd := testLoginCommand(t) + cmd.client = client - storedToken, err := tokenHelper.Get() - if err != nil { - t.Fatal(err) - } + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - if exp := ""; storedToken != exp { - t.Errorf("expected %q to be %q", storedToken, exp) - } -} + // Ensure we have no token to start + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Errorf("expected token helper to be empty: %s: %q", err, storedToken) + } -func TestStores(t *testing.T) { - t.Parallel() + code := cmd.Run([]string{ + "-no-store", + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - client, closer := testVaultServer(t) - defer closer() + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", + if exp := ""; storedToken != exp { + t.Errorf("expected %q to be %q", storedToken, exp) + } }) - if err != nil { - t.Fatal(err) - } - token := secret.Auth.ClientToken - _, cmd := testLoginCommand(t) - cmd.client = client + t.Run("stores", func(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - - code := cmd.Run([]string{ - token, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + client, closer := testVaultServer(t) + defer closer() - storedToken, err := tokenHelper.Get() - if err != nil { - t.Fatal(err) - } - - if storedToken != token { - t.Errorf("expected %q to be %q", storedToken, token) - } -} - -func TestTokenOnly(t *testing.T) { - t.Parallel() + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken - client, closer := testVaultServer(t) - defer closer() + _, cmd := testLoginCommand(t) + cmd.client = client - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - ui, cmd := testLoginCommand(t) - cmd.client = client + code := cmd.Run([]string{ + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } - code := cmd.Run([]string{ - "-token-only", - "-method", "userpass", - "username=test", - "password=test", + if storedToken != token { + t.Errorf("expected %q to be %q", storedToken, token) + } }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - // Verify only the token was printed - token := ui.OutputWriter.String() - if l, exp := len(token), minTokenLengthExternal+vault.TokenPrefixLength; l != exp { - t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token) - } + t.Run("token_only", func(t *testing.T) { + t.Parallel() - // Verify the token was not stored - if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } -} + client, closer := testVaultServer(t) + defer closer() -func TestFailureNoStore(t *testing.T) { - t.Parallel() + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testLoginCommand(t) + cmd.client = client - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - ui, cmd := testLoginCommand(t) - cmd.client = client + code := cmd.Run([]string{ + "-token-only", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + // Verify only the token was printed + token := ui.OutputWriter.String() + if l, exp := len(token), minTokenLengthExternal+vault.TokenPrefixLength; l != exp { + t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token) + } - code := cmd.Run([]string{ - "not-a-real-token", + // Verify the token was not stored + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - expected := "Error authenticating: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } + t.Run("failure_no_store", func(t *testing.T) { + t.Parallel() - if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } -} - -func TestWrapAutoUnwrap(t *testing.T) { - t.Parallel() + client, closer := testVaultServer(t) + defer closer() - client, closer := testVaultServer(t) - defer closer() + ui, cmd := testLoginCommand(t) + cmd.client = client - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - _, cmd := testLoginCommand(t) - cmd.client = client + code := cmd.Run([]string{ + "not-a-real-token", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - // Set the wrapping ttl to 5s. We can't set this via the flag because we - // override the client object before that particular flag is parsed. - client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + expected := "Error authenticating: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } - code := cmd.Run([]string{ - "-method", "userpass", - "username=test", - "password=test", + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - // Unset the wrapping - client.SetWrappingLookupFunc(func(string, string) string { return "" }) + t.Run("wrap_auto_unwrap", func(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - token, err := tokenHelper.Get() - if err != nil || token == "" { - t.Fatalf("expected token from helper: %s: %q", err, token) - } - client.SetToken(token) + client, closer := testVaultServer(t) + defer closer() - // Ensure the resulting token is unwrapped - secret, err := client.Auth().Token().LookupSelf() - if err != nil { - t.Error(err) - } - if secret == nil { - t.Fatal("secret was nil") - } + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - if secret.WrapInfo != nil { - t.Errorf("expected to be unwrapped: %#v", secret) - } -} + _, cmd := testLoginCommand(t) + cmd.client = client -func TestWrapTokenOnly(t *testing.T) { - t.Parallel() + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) - client, closer := testVaultServer(t) - defer closer() + code := cmd.Run([]string{ + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) - ui, cmd := testLoginCommand(t) - cmd.client = client + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + token, err := tokenHelper.Get() + if err != nil || token == "" { + t.Fatalf("expected token from helper: %s: %q", err, token) + } + client.SetToken(token) - // Set the wrapping ttl to 5s. We can't set this via the flag because we - // override the client object before that particular flag is parsed. - client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + // Ensure the resulting token is unwrapped + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Error(err) + } + if secret == nil { + t.Fatal("secret was nil") + } - code := cmd.Run([]string{ - "-token-only", - "-method", "userpass", - "username=test", - "password=test", + if secret.WrapInfo != nil { + t.Errorf("expected to be unwrapped: %#v", secret) + } }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - // Unset the wrapping - client.SetWrappingLookupFunc(func(string, string) string { return "" }) + t.Run("wrap_token_only", func(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - storedToken, err := tokenHelper.Get() - if err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } + client, closer := testVaultServer(t) + defer closer() - token := strings.TrimSpace(ui.OutputWriter.String()) - if token == "" { - t.Errorf("expected %q to not be %q", token, "") - } + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - // Ensure the resulting token is, in fact, still wrapped. - client.SetToken(token) - secret, err := client.Logical().Unwrap("") - if err != nil { - t.Error(err) - } - if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { - t.Fatalf("expected secret to have auth: %#v", secret) - } -} + ui, cmd := testLoginCommand(t) + cmd.client = client -func TestWrapNoStore(t *testing.T) { - t.Parallel() + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) - client, closer := testVaultServer(t) - defer closer() + code := cmd.Run([]string{ + "-token-only", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) - ui, cmd := testLoginCommand(t) - cmd.client = client + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } - // Set the wrapping ttl to 5s. We can't set this via the flag because we - // override the client object before that particular flag is parsed. - client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + token := strings.TrimSpace(ui.OutputWriter.String()) + if token == "" { + t.Errorf("expected %q to not be %q", token, "") + } - code := cmd.Run([]string{ - "-no-store", - "-method", "userpass", - "username=test", - "password=test", + // Ensure the resulting token is, in fact, still wrapped. + client.SetToken(token) + secret, err := client.Logical().Unwrap("") + if err != nil { + t.Error(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("expected secret to have auth: %#v", secret) + } }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - // Unset the wrapping - client.SetWrappingLookupFunc(func(string, string) string { return "" }) + t.Run("wrap_no_store", func(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - storedToken, err := tokenHelper.Get() - if err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } + client, closer := testVaultServer(t) + defer closer() - expected := "wrapping_token" - output := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(output, expected) { - t.Errorf("expected %q to contain %q", output, expected) - } -} + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } -func TestCommunicationFailure(t *testing.T) { - t.Parallel() + ui, cmd := testLoginCommand(t) + cmd.client = client - client, closer := testVaultServerBad(t) - defer closer() + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) - ui, cmd := testLoginCommand(t) - cmd.client = client + code := cmd.Run([]string{ + "-no-store", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - code := cmd.Run([]string{ - "token", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) - expected := "Error authenticating: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } -} + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } -func TestNoTabs(t *testing.T) { - t.Parallel() + expected := "wrapping_token" + output := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(output, expected) { + t.Errorf("expected %q to contain %q", output, expected) + } + }) - _, cmd := testLoginCommand(t) - assertNoTabs(t, cmd) -} + t.Run("login_mfa_single_phase", func(t *testing.T) { + t.Parallel() -func TestLoginMFASinglePhase(t *testing.T) { - t.Parallel() + client, closer := testVaultServer(t) + defer closer() - client, closer := testVaultServer(t) - defer closer() + ui, cmd := testLoginCommand(t) - methodName := "foo" - waitPeriod := 5 - userClient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, methodName, waitPeriod) - enginePath := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client) + cmd.client = userclient - runCommand := func(methodIdentifier string) { - // the time required for the totp engine to generate a new code - time.Sleep(time.Duration(waitPeriod) * time.Second) + enginePath := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) totpCode := testhelpers.GetTOTPCodeFromEngine(t, client, enginePath) - ui, cmd := testLoginCommand(t) - cmd.client = userClient + // login command bails early for test clients, so we have to explicitly set this - cmd.client.SetMFACreds([]string{methodIdentifier + ":" + totpCode}) + cmd.client.SetMFACreds([]string{methodID + ":" + totpCode}) code := cmd.Run([]string{ "-method", "userpass", "username=testuser1", @@ -499,118 +462,85 @@ func TestLoginMFASinglePhase(t *testing.T) { if err != nil { t.Fatal(err) } - if storedToken == "" { - t.Fatal("expected non-empty stored token") - } - output := ui.OutputWriter.String() + output = ui.OutputWriter.String() + ui.ErrorWriter.String() + t.Logf("\n%+v", output) if !strings.Contains(output, storedToken) { t.Fatalf("expected stored token: %q, got: %q", storedToken, output) } - } - runCommand(methodID) - runCommand(methodName) -} - -func TestLoginMFATwoPhase(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testLoginCommand(t) - - userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, "", 5) - cmd.client = userclient - - _ = testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) - - // clear the MFA creds just to be sure - cmd.client.SetMFACreds([]string{}) - - code := cmd.Run([]string{ - "-method", "userpass", - "username=testuser1", - "password=testpassword", }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - expected := methodID - output := ui.OutputWriter.String() - if !strings.Contains(output, expected) { - t.Fatalf("expected stored token: %q, got: %q", expected, output) - } + t.Run("login_mfa_two_phase", func(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - storedToken, err := tokenHelper.Get() - if storedToken != "" { - t.Fatal("expected empty stored token") - } - if err != nil { - t.Fatal(err) - } -} + client, closer := testVaultServer(t) + defer closer() -func TestLoginMFATwoPhaseNonInteractiveMethodName(t *testing.T) { - t.Parallel() + ui, cmd := testLoginCommand(t) - client, closer := testVaultServer(t) - defer closer() + userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client) + cmd.client = userclient - ui, cmd := testLoginCommand(t) + _ = testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) - methodName := "foo" - waitPeriod := 5 - userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, methodName, waitPeriod) - cmd.client = userclient + // clear the MFA creds just to be sure + cmd.client.SetMFACreds([]string{}) - engineName := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + code := cmd.Run([]string{ + "-method", "userpass", + "username=testuser1", + "password=testpassword", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - // clear the MFA creds just to be sure - cmd.client.SetMFACreds([]string{}) + expected := methodID + output = ui.OutputWriter.String() + ui.ErrorWriter.String() + t.Logf("\n%+v", output) + if !strings.Contains(output, expected) { + t.Fatalf("expected stored token: %q, got: %q", expected, output) + } - code := cmd.Run([]string{ - "-method", "userpass", - "-non-interactive", - "username=testuser1", - "password=testpassword", + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if storedToken != "" { + t.Fatal("expected empty stored token") + } + if err != nil { + t.Fatal(err) + } }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - output := ui.OutputWriter.String() + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() - reqIdReg := regexp.MustCompile(`mfa_request_id\s+([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\s+mfa_constraint`) - reqIDRaw := reqIdReg.FindAllStringSubmatch(output, -1) - if len(reqIDRaw) == 0 || len(reqIDRaw[0]) < 2 { - t.Fatal("failed to MFA request ID from output") - } - mfaReqID := reqIDRaw[0][1] - - validateFunc := func(methodIdentifier string) { - // the time required for the totp engine to generate a new code - time.Sleep(time.Duration(waitPeriod) * time.Second) - totpPasscode1 := "passcode=" + testhelpers.GetTOTPCodeFromEngine(t, client, engineName) - - secret, err := cmd.client.Logical().WriteWithContext(context.Background(), "sys/mfa/validate", map[string]interface{}{ - "mfa_request_id": mfaReqID, - "mfa_payload": map[string][]string{ - methodIdentifier: {totpPasscode1}, - }, + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testLoginCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "token", }) - if err != nil { - t.Fatalf("mfa validation failed: %v", err) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - if secret.Auth == nil || secret.Auth.ClientToken == "" { - t.Fatalf("mfa validation did not return a client token") + expected := "Error authenticating: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } - } + }) - validateFunc(methodName) + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testLoginCommand(t) + assertNoTabs(t, cmd) + }) } diff --git a/command/main.go b/command/main.go index 13fbe2181822d..4a5e8028214e6 100644 --- a/command/main.go +++ b/command/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -220,19 +217,18 @@ func RunCustom(args []string, runOpts *RunOptions) int { return 1 } - commands := initCommands(ui, serverCmdUi, runOpts) + initCommands(ui, serverCmdUi, runOpts) hiddenCommands := []string{"version"} cli := &cli.CLI{ Name: "vault", Args: args, - Commands: commands, + Commands: Commands, HelpFunc: groupedHelpFunc( cli.BasicHelpFunc("vault"), ), - HelpWriter: runOpts.Stdout, - ErrorWriter: runOpts.Stderr, + HelpWriter: runOpts.Stderr, HiddenCommands: hiddenCommands, Autocomplete: true, AutocompleteNoDefaultFlags: true, diff --git a/command/monitor.go b/command/monitor.go index 7545b82da71a3..e6309258973f1 100644 --- a/command/monitor.go +++ b/command/monitor.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/monitor_test.go b/command/monitor_test.go index 0cc722c98d91f..d10547a8c8734 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -9,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/helper/testhelpers" "github.com/mitchellh/cli" ) @@ -74,11 +72,14 @@ func TestMonitorCommand_Run(t *testing.T) { cmd.client = client cmd.ShutdownCh = shutdownCh + stopCh := testhelpers.GenerateDebugLogs(t, client) + go func() { atomic.StoreInt64(&code, int64(cmd.Run(tc.args))) }() <-time.After(3 * time.Second) + stopCh <- struct{}{} close(shutdownCh) if atomic.LoadInt64(&code) != tc.code { @@ -89,6 +90,8 @@ func TestMonitorCommand_Run(t *testing.T) { if !strings.Contains(combined, tc.out) { t.Fatalf("expected %q to contain %q", combined, tc.out) } + + <-stopCh }) } } diff --git a/command/namespace.go b/command/namespace.go index 18bc6e92eb552..702395753da8a 100644 --- a/command/namespace.go +++ b/command/namespace.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_api_lock.go b/command/namespace_api_lock.go index 57b196992c945..48fec344c7416 100644 --- a/command/namespace_api_lock.go +++ b/command/namespace_api_lock.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_api_unlock.go b/command/namespace_api_unlock.go index 77e829147afb2..38f4a764d4d52 100644 --- a/command/namespace_api_unlock.go +++ b/command/namespace_api_unlock.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_create.go b/command/namespace_create.go index 60df834d739a8..7d1f52fa8c9ba 100644 --- a/command/namespace_create.go +++ b/command/namespace_create.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_delete.go b/command/namespace_delete.go index 5c79c35b1050b..a5d18929368b8 100644 --- a/command/namespace_delete.go +++ b/command/namespace_delete.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_list.go b/command/namespace_list.go index 6394daedf36c6..2be2a3874df5b 100644 --- a/command/namespace_list.go +++ b/command/namespace_list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_lookup.go b/command/namespace_lookup.go index ee18736633a54..98d710ea536e2 100644 --- a/command/namespace_lookup.go +++ b/command/namespace_lookup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/namespace_patch.go b/command/namespace_patch.go index 2a4a6dc699a80..3ae6f6bc8b31b 100644 --- a/command/namespace_patch.go +++ b/command/namespace_patch.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator.go b/command/operator.go index a79f7bff84060..ad1bb439fc7d5 100644 --- a/command/operator.go +++ b/command/operator.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index 5abddb7c980d6..9518fdf2b8c8a 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -30,16 +27,18 @@ import ( physconsul "github.com/hashicorp/vault/physical/consul" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/version" sr "github.com/hashicorp/vault/serviceregistration" srconsul "github.com/hashicorp/vault/serviceregistration/consul" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/diagnose" "github.com/hashicorp/vault/vault/hcp_link" - "github.com/hashicorp/vault/version" "github.com/mitchellh/cli" "github.com/posener/complete" ) +const OperatorDiagnoseEnableEnv = "VAULT_DIAGNOSE" + const CoreConfigUninitializedErr = "Diagnose cannot attempt this step because core config could not be set." var ( @@ -447,25 +446,26 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error goto SEALFAIL } - for _, seal := range seals { - // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir - // section. - if seal == nil { - continue - } - seal := seal // capture range variable - // Ensure that the seal finalizer is called, even if using verify-only - defer func(seal *vault.Seal) { - sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType().String()) - finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "Finalize "+sealType+" Seal") - err = (*seal).Finalize(finalizeSealContext) - if err != nil { - diagnose.Fail(finalizeSealContext, "Error finalizing seal.") - diagnose.Advise(finalizeSealContext, "This likely means that the barrier is still in use; therefore, finalizing the seal timed out.") - finalizeSealSpan.End() + if seals != nil { + for _, seal := range seals { + // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir + // section. + if seal == nil { + continue } - finalizeSealSpan.End() - }(&seal) + // Ensure that the seal finalizer is called, even if using verify-only + defer func(seal *vault.Seal) { + sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType().String()) + finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "Finalize "+sealType+" Seal") + err = (*seal).Finalize(finalizeSealContext) + if err != nil { + diagnose.Fail(finalizeSealContext, "Error finalizing seal.") + diagnose.Advise(finalizeSealContext, "This likely means that the barrier is still in use; therefore, finalizing the seal timed out.") + finalizeSealSpan.End() + } + finalizeSealSpan.End() + }(&seal) + } } if barrierSeal == nil { diff --git a/command/operator_diagnose_test.go b/command/operator_diagnose_test.go index 2c9a1a0363e3c..99834b9d48bcd 100644 --- a/command/operator_diagnose_test.go +++ b/command/operator_diagnose_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !race package command diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go index 6665e8bdbf8ce..ece541683be1d 100644 --- a/command/operator_generate_root.go +++ b/command/operator_generate_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_generate_root_test.go b/command/operator_generate_root_test.go index 1436ab2ddfae5..b4489718efbea 100644 --- a/command/operator_generate_root_test.go +++ b/command/operator_generate_root_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !race package command diff --git a/command/operator_init.go b/command/operator_init.go index 080f5853def56..6d67dcd9b6a45 100644 --- a/command/operator_init.go +++ b/command/operator_init.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -240,15 +237,6 @@ func (c *OperatorInitCommand) Run(args []string) int { return 2 } - // -output-curl string returns curl command for seal status - // setting this to false and then setting actual value after reading seal status - currentOutputCurlString := client.OutputCurlString() - client.SetOutputCurlString(false) - // -output-policy string returns minimum required policy HCL for seal status - // setting this to false and then setting actual value after reading seal status - outputPolicy := client.OutputPolicy() - client.SetOutputPolicy(false) - // Set defaults based on use of auto unseal seal sealInfo, err := client.Sys().SealStatus() if err != nil { @@ -256,9 +244,6 @@ func (c *OperatorInitCommand) Run(args []string) int { return 2 } - client.SetOutputCurlString(currentOutputCurlString) - client.SetOutputPolicy(outputPolicy) - switch sealInfo.RecoverySeal { case true: if c.flagRecoveryShares == 0 { diff --git a/command/operator_init_test.go b/command/operator_init_test.go index 06647d7278248..ec02873587dfc 100644 --- a/command/operator_init_test.go +++ b/command/operator_init_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !race package command diff --git a/command/operator_key_status.go b/command/operator_key_status.go index 412a00cd20e6c..e015fb0e32f74 100644 --- a/command/operator_key_status.go +++ b/command/operator_key_status.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_key_status_test.go b/command/operator_key_status_test.go index 9f8fbb0f6dc49..01cb9136286b1 100644 --- a/command/operator_key_status_test.go +++ b/command/operator_key_status_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_members.go b/command/operator_members.go index 986313a3201b0..d4bd1fbe4389f 100644 --- a/command/operator_members.go +++ b/command/operator_members.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_migrate.go b/command/operator_migrate.go index 01e8bbb21322e..a974f58d6cb61 100644 --- a/command/operator_migrate.go +++ b/command/operator_migrate.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "context" "fmt" "io/ioutil" - "math" "net/url" "os" "sort" @@ -26,7 +22,6 @@ import ( "github.com/mitchellh/cli" "github.com/pkg/errors" "github.com/posener/complete" - "golang.org/x/sync/errgroup" ) var ( @@ -44,7 +39,6 @@ type OperatorMigrateCommand struct { flagLogLevel string flagStart string flagReset bool - flagMaxParallel int logger log.Logger ShutdownCh chan struct{} } @@ -104,14 +98,6 @@ func (c *OperatorMigrateCommand) Flags() *FlagSets { Usage: "Reset the migration lock. No migration will occur.", }) - f.IntVar(&IntVar{ - Name: "max-parallel", - Default: 10, - Target: &c.flagMaxParallel, - Usage: "Specifies the maximum number of parallel migration threads (goroutines) that may be used when migrating. " + - "This can speed up the migration process on slow backends but uses more resources.", - }) - f.StringVar(&StringVar{ Name: "log-level", Target: &c.flagLogLevel, @@ -140,6 +126,7 @@ func (c *OperatorMigrateCommand) Run(args []string) int { c.UI.Error(err.Error()) return 1 } + c.flagLogLevel = strings.ToLower(c.flagLogLevel) validLevels := []string{"trace", "debug", "info", "warn", "error"} if !strutil.StrListContains(validLevels, c.flagLogLevel) { @@ -148,11 +135,6 @@ func (c *OperatorMigrateCommand) Run(args []string) int { } c.logger = logging.NewVaultLogger(log.LevelFromString(c.flagLogLevel)) - if c.flagMaxParallel < 1 { - c.UI.Error(fmt.Sprintf("Argument to flag -max-parallel must be between 1 and %d", math.MaxInt)) - return 1 - } - if c.flagConfig == "" { c.UI.Error("Must specify exactly one config path using -config") return 1 @@ -182,7 +164,7 @@ func (c *OperatorMigrateCommand) Run(args []string) int { } // migrate attempts to instantiate the source and destinations backends, -// and then invoke the migration the root of the keyspace. +// and then invoke the migration the the root of the keyspace. func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config) if err != nil { @@ -227,7 +209,7 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { doneCh := make(chan error) go func() { - doneCh <- c.migrateAll(ctx, from, to, c.flagMaxParallel) + doneCh <- c.migrateAll(ctx, from, to) }() select { @@ -243,8 +225,8 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { } // migrateAll copies all keys in lexicographic order. -func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend, maxParallel int) error { - return dfsScan(ctx, from, maxParallel, func(ctx context.Context, path string) error { +func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error { + return dfsScan(ctx, from, func(ctx context.Context, path string) error { if path < c.flagStart || path == storageMigrationLock || path == vault.CoreLockPath { return nil } @@ -383,20 +365,10 @@ func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) err // dfsScan will invoke cb with every key from source. // Keys will be traversed in lexicographic, depth-first order. -func dfsScan(ctx context.Context, source physical.Backend, maxParallel int, cb func(ctx context.Context, path string) error) error { +func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.Context, path string) error) error { dfs := []string{""} - eg, ctx := errgroup.WithContext(ctx) - eg.SetLimit(maxParallel) - for l := len(dfs); l > 0; l = len(dfs) { - // Check for cancellation - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - key := dfs[len(dfs)-1] if key == "" || strings.HasSuffix(key, "/") { children, err := source.List(ctx, key) @@ -413,14 +385,19 @@ func dfsScan(ctx context.Context, source physical.Backend, maxParallel int, cb f } } } else { - // Pooling - eg.Go(func() error { - return cb(ctx, key) - }) + err := cb(ctx, key) + if err != nil { + return err + } dfs = dfs[:len(dfs)-1] } - } - return eg.Wait() + select { + case <-ctx.Done(): + return nil + default: + } + } + return nil } diff --git a/command/operator_migrate_test.go b/command/operator_migrate_test.go index 3d28430914846..5db53ebbfcb97 100644 --- a/command/operator_migrate_test.go +++ b/command/operator_migrate_test.go @@ -1,19 +1,16 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "bytes" "context" "fmt" + "io/ioutil" "math/rand" "os" "path/filepath" "reflect" "sort" "strings" - "sync" "testing" "time" @@ -21,6 +18,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/base62" "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/testhelpers" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" ) @@ -37,45 +35,8 @@ func TestMigration(t *testing.T) { fromFactory := physicalBackends["file"] - folder := t.TempDir() - - confFrom := map[string]string{ - "path": folder, - } - - from, err := fromFactory(confFrom, nil) - if err != nil { - t.Fatal(err) - } - if err := storeData(from, data); err != nil { - t.Fatal(err) - } - - toFactory := physicalBackends["inmem"] - confTo := map[string]string{} - to, err := toFactory(confTo, nil) - if err != nil { - t.Fatal(err) - } - cmd := OperatorMigrateCommand{ - logger: log.NewNullLogger(), - } - if err := cmd.migrateAll(context.Background(), from, to, 1); err != nil { - t.Fatal(err) - } - - if err := compareStoredData(to, data, ""); err != nil { - t.Fatal(err) - } - }) - - t.Run("Concurrent migration", func(t *testing.T) { - data := generateData() - - fromFactory := physicalBackends["file"] - - folder := t.TempDir() - + folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator")) + defer os.RemoveAll(folder) confFrom := map[string]string{ "path": folder, } @@ -98,10 +59,10 @@ func TestMigration(t *testing.T) { cmd := OperatorMigrateCommand{ logger: log.NewNullLogger(), } - - if err := cmd.migrateAll(context.Background(), from, to, 10); err != nil { + if err := cmd.migrateAll(context.Background(), from, to); err != nil { t.Fatal(err) } + if err := compareStoredData(to, data, ""); err != nil { t.Fatal(err) } @@ -121,46 +82,8 @@ func TestMigration(t *testing.T) { } toFactory := physicalBackends["file"] - folder := t.TempDir() - confTo := map[string]string{ - "path": folder, - } - - to, err := toFactory(confTo, nil) - if err != nil { - t.Fatal(err) - } - - const start = "m" - - cmd := OperatorMigrateCommand{ - logger: log.NewNullLogger(), - flagStart: start, - } - if err := cmd.migrateAll(context.Background(), from, to, 1); err != nil { - t.Fatal(err) - } - - if err := compareStoredData(to, data, start); err != nil { - t.Fatal(err) - } - }) - - t.Run("Start option (parallel)", func(t *testing.T) { - data := generateData() - - fromFactory := physicalBackends["inmem"] - confFrom := map[string]string{} - from, err := fromFactory(confFrom, nil) - if err != nil { - t.Fatal(err) - } - if err := storeData(from, data); err != nil { - t.Fatal(err) - } - - toFactory := physicalBackends["file"] - folder := t.TempDir() + folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator")) + defer os.RemoveAll(folder) confTo := map[string]string{ "path": folder, } @@ -176,7 +99,7 @@ func TestMigration(t *testing.T) { logger: log.NewNullLogger(), flagStart: start, } - if err := cmd.migrateAll(context.Background(), from, to, 10); err != nil { + if err := cmd.migrateAll(context.Background(), from, to); err != nil { t.Fatal(err) } @@ -187,8 +110,9 @@ func TestMigration(t *testing.T) { t.Run("Config parsing", func(t *testing.T) { cmd := new(OperatorMigrateCommand) - cfgName := filepath.Join(t.TempDir(), "migrator") - os.WriteFile(cfgName, []byte(` + + cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator")) + ioutil.WriteFile(cfgName, []byte(` storage_source "src_type" { path = "src_path" } @@ -196,6 +120,7 @@ storage_source "src_type" { storage_destination "dest_type" { path = "dest_path" }`), 0o644) + defer os.Remove(cfgName) expCfg := &migratorConfig{ StorageSource: &server.Storage{ @@ -220,7 +145,7 @@ storage_destination "dest_type" { } verifyBad := func(cfg string) { - os.WriteFile(cfgName, []byte(cfg), 0o644) + ioutil.WriteFile(cfgName, []byte(cfg), 0o644) _, err := cmd.loadMigratorConfig(cfgName) if err == nil { t.Fatalf("expected error but none received from: %v", cfg) @@ -267,7 +192,6 @@ storage_destination "dest_type2" { path = "dest_path" }`) }) - t.Run("DFS Scan", func(t *testing.T) { s, _ := physicalBackends["inmem"](map[string]string{}, nil) @@ -280,16 +204,9 @@ storage_destination "dest_type2" { l := randomLister{s} - type SafeAppend struct { - out []string - lock sync.Mutex - } - outKeys := SafeAppend{} - dfsScan(context.Background(), l, 10, func(ctx context.Context, path string) error { - outKeys.lock.Lock() - defer outKeys.lock.Unlock() - - outKeys.out = append(outKeys.out, path) + var out []string + dfsScan(context.Background(), l, func(ctx context.Context, path string) error { + out = append(out, path) return nil }) @@ -301,11 +218,8 @@ storage_destination "dest_type2" { keys = append(keys, key) } sort.Strings(keys) - outKeys.lock.Lock() - sort.Strings(outKeys.out) - outKeys.lock.Unlock() - if !reflect.DeepEqual(keys, outKeys.out) { - t.Fatalf("expected equal: %v, %v", keys, outKeys.out) + if !reflect.DeepEqual(keys, out) { + t.Fatalf("expected equal: %v, %v", keys, out) } }) } diff --git a/command/operator_raft.go b/command/operator_raft.go index 8720b78ce7cb7..34107dbb59535 100644 --- a/command/operator_raft.go +++ b/command/operator_raft.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_autopilot_get_config.go b/command/operator_raft_autopilot_get_config.go index 11d7da87d8950..1462e354c5590 100644 --- a/command/operator_raft_autopilot_get_config.go +++ b/command/operator_raft_autopilot_get_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_autopilot_set_config.go b/command/operator_raft_autopilot_set_config.go index 0d9a5f3362951..4a839c5fae3ab 100644 --- a/command/operator_raft_autopilot_set_config.go +++ b/command/operator_raft_autopilot_set_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_autopilot_state.go b/command/operator_raft_autopilot_state.go index 99f188aae78f0..8a530dc75e0f9 100644 --- a/command/operator_raft_autopilot_state.go +++ b/command/operator_raft_autopilot_state.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go index 57e14a827edcb..466ab84142b41 100644 --- a/command/operator_raft_join.go +++ b/command/operator_raft_join.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_listpeers.go b/command/operator_raft_listpeers.go index 4e82c15da98ed..2c80112ec3faa 100644 --- a/command/operator_raft_listpeers.go +++ b/command/operator_raft_listpeers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_remove_peer.go b/command/operator_raft_remove_peer.go index 84b516cf62947..6f7e837474fac 100644 --- a/command/operator_raft_remove_peer.go +++ b/command/operator_raft_remove_peer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_snapshot.go b/command/operator_raft_snapshot.go index 036c6ebae09f6..5e3b042877363 100644 --- a/command/operator_raft_snapshot.go +++ b/command/operator_raft_snapshot.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_snapshot_restore.go b/command/operator_raft_snapshot_restore.go index 6067adca79049..3755d6cbfd0d9 100644 --- a/command/operator_raft_snapshot_restore.go +++ b/command/operator_raft_snapshot_restore.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_raft_snapshot_save.go b/command/operator_raft_snapshot_save.go index 2abbb0ad2234e..496b0a7b52c48 100644 --- a/command/operator_raft_snapshot_save.go +++ b/command/operator_raft_snapshot_save.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_rekey.go b/command/operator_rekey.go index dde0e5800528a..b73349405daaf 100644 --- a/command/operator_rekey.go +++ b/command/operator_rekey.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -23,11 +20,6 @@ var ( _ cli.CommandAutocomplete = (*OperatorRekeyCommand)(nil) ) -const ( - keyTypeRecovery = "Recovery" - keyTypeUnseal = "Unseal" -) - type OperatorRekeyCommand struct { *BaseCommand @@ -66,9 +58,6 @@ Usage: vault operator rekey [options] [KEY] the command. If key is specified as "-", the command will read from stdin. If a TTY is available, the command will prompt for text. - If the flag -target=recovery is supplied, then this operation will require a - quorum of recovery keys in order to generate a new set of recovery keys. - Initialize a rekey: $ vault operator rekey \ @@ -123,7 +112,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Target: &c.flagCancel, Default: false, Usage: "Reset the rekeying progress. This will discard any submitted " + - "unseal keys, recovery keys, or configuration.", + "unseal keys or configuration.", }) f.BoolVar(&BoolVar{ @@ -131,7 +120,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Target: &c.flagStatus, Default: false, Usage: "Print the status of the current attempt without providing an " + - "unseal or recovery key.", + "unseal key.", }) f.IntVar(&IntVar{ @@ -141,7 +130,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Default: 5, Completion: complete.PredictAnything, Usage: "Number of key shares to split the generated root key into. " + - "This is the number of \"unseal keys\" or \"recovery keys\" to generate.", + "This is the number of \"unseal keys\" to generate.", }) f.IntVar(&IntVar{ @@ -161,7 +150,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { EnvVar: "", Completion: complete.PredictAnything, Usage: "Nonce value provided at initialization. The same nonce value " + - "must be provided with each unseal or recovery key.", + "must be provided with each unseal key.", }) f.StringVar(&StringVar{ @@ -190,7 +179,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Usage: "Comma-separated list of paths to files on disk containing " + "public PGP keys OR a comma-separated list of Keybase usernames using " + "the format \"keybase:\". When supplied, the generated " + - "unseal or recovery keys will be encrypted and base64-encoded in the order " + + "unseal keys will be encrypted and base64-encoded in the order " + "specified in this list.", }) @@ -200,25 +189,25 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Name: "backup", Target: &c.flagBackup, Default: false, - Usage: "Store a backup of the current PGP encrypted unseal or recovery keys in " + + Usage: "Store a backup of the current PGP encrypted unseal keys in " + "Vault's core. The encrypted values can be recovered in the event of " + "failure or discarded after success. See the -backup-delete and " + "-backup-retrieve options for more information. This option only " + - "applies when the existing unseal or recovery keys were PGP encrypted.", + "applies when the existing unseal keys were PGP encrypted.", }) f.BoolVar(&BoolVar{ Name: "backup-delete", Target: &c.flagBackupDelete, Default: false, - Usage: "Delete any stored backup unseal or recovery keys.", + Usage: "Delete any stored backup unseal keys.", }) f.BoolVar(&BoolVar{ Name: "backup-retrieve", Target: &c.flagBackupRetrieve, Default: false, - Usage: "Retrieve the backed-up unseal or recovery keys. This option is only available " + + Usage: "Retrieve the backed-up unseal keys. This option is only available " + "if the PGP keys were provided and the backup has not been deleted.", }) @@ -279,12 +268,10 @@ func (c *OperatorRekeyCommand) Run(args []string) int { func (c *OperatorRekeyCommand) init(client *api.Client) int { // Handle the different API requests var fn func(*api.RekeyInitRequest) (*api.RekeyStatusResponse, error) - keyTypeRequired := keyTypeUnseal switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { case "barrier": fn = client.Sys().RekeyInit case "recovery", "hsm": - keyTypeRequired = keyTypeRecovery fn = client.Sys().RekeyRecoveryKeyInit default: c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) @@ -308,25 +295,25 @@ func (c *OperatorRekeyCommand) init(client *api.Client) int { if len(c.flagPGPKeys) == 0 { if Format(c.UI) == "table" { c.UI.Warn(wrapAtLength( - fmt.Sprintf("WARNING! If you lose the keys after they are returned, there is no "+ - "recovery. Consider canceling this operation and re-initializing "+ - "with the -pgp-keys flag to protect the returned %s keys along "+ - "with -backup to allow recovery of the encrypted keys in case of "+ - "emergency. You can delete the stored keys later using the -delete "+ - "flag.", strings.ToLower(keyTypeRequired)))) + "WARNING! If you lose the keys after they are returned, there is no " + + "recovery. Consider canceling this operation and re-initializing " + + "with the -pgp-keys flag to protect the returned unseal keys along " + + "with -backup to allow recovery of the encrypted keys in case of " + + "emergency. You can delete the stored keys later using the -delete " + + "flag.")) c.UI.Output("") } } if len(c.flagPGPKeys) > 0 && !c.flagBackup { if Format(c.UI) == "table" { c.UI.Warn(wrapAtLength( - fmt.Sprintf("WARNING! You are using PGP keys for encrypted the resulting %s "+ - "keys, but you did not enable the option to backup the keys to "+ - "Vault's core. If you lose the encrypted keys after they are "+ - "returned, you will not be able to recover them. Consider canceling "+ - "this operation and re-running with -backup to allow recovery of the "+ - "encrypted unseal keys in case of emergency. You can delete the "+ - "stored keys later using the -delete flag.", strings.ToLower(keyTypeRequired)))) + "WARNING! You are using PGP keys for encrypted the resulting unseal " + + "keys, but you did not enable the option to backup the keys to " + + "Vault's core. If you lose the encrypted keys after they are " + + "returned, you will not be able to recover them. Consider canceling " + + "this operation and re-running with -backup to allow recovery of the " + + "encrypted unseal keys in case of emergency. You can delete the " + + "stored keys later using the -delete flag.")) c.UI.Output("") } } @@ -371,7 +358,7 @@ func (c *OperatorRekeyCommand) cancel(client *api.Client) int { func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { var statusFn func() (interface{}, error) var updateFn func(string, string) (interface{}, error) - keyTypeRequired := keyTypeUnseal + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { case "barrier": statusFn = func() (interface{}, error) { @@ -389,7 +376,6 @@ func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { } } case "recovery", "hsm": - keyTypeRequired = keyTypeRecovery statusFn = func() (interface{}, error) { return client.Sys().RekeyRecoveryKeyStatus() } @@ -462,7 +448,7 @@ func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { // Nonce value is not required if we are prompting via the terminal w := getWriterFromUI(c.UI) fmt.Fprintf(w, "Rekey operation nonce: %s\n", nonce) - fmt.Fprintf(w, "%s Key (will be hidden): ", keyTypeRequired) + fmt.Fprintf(w, "Unseal Key (will be hidden): ") key, err = password.Read(os.Stdin) fmt.Fprintf(w, "\n") if err != nil { @@ -472,11 +458,11 @@ func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { } c.UI.Error(wrapAtLength(fmt.Sprintf("An error occurred attempting to "+ - "ask for the %s key. The raw error message is shown below, but "+ + "ask for the unseal key. The raw error message is shown below, but "+ "usually this is because you attempted to pipe a value into the "+ "command or you are executing outside of a terminal (tty). If you "+ "want to pipe the value, pass \"-\" as the argument to read from "+ - "stdin. The raw error was: %s", strings.ToLower(keyTypeRequired), err))) + "stdin. The raw error was: %s", err))) return 1 } default: // Supplied directly as an arg @@ -711,7 +697,7 @@ func (c *OperatorRekeyCommand) printUnsealKeys(client *api.Client, status *api.R ))) case "recovery", "hsm": c.UI.Output(wrapAtLength(fmt.Sprintf( - "The encrypted recovery keys are backed up to \"core/recovery-keys-backup\" " + + "The encrypted unseal keys are backed up to \"core/recovery-keys-backup\" " + "in the storage backend. Remove these keys at any time using " + "\"vault operator rekey -backup-delete -target=recovery\". Vault does not automatically " + "remove these keys.", @@ -722,56 +708,33 @@ func (c *OperatorRekeyCommand) printUnsealKeys(client *api.Client, status *api.R switch status.VerificationRequired { case false: c.UI.Output("") - switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { - case "barrier": - c.UI.Output(wrapAtLength(fmt.Sprintf( - "Vault unseal keys rekeyed with %d key shares and a key threshold of %d. Please "+ - "securely distribute the key shares printed above. When Vault is "+ - "re-sealed, restarted, or stopped, you must supply at least %d of "+ - "these keys to unseal it before it can start servicing requests.", - status.N, - status.T, - status.T))) - case "recovery", "hsm": - c.UI.Output(wrapAtLength(fmt.Sprintf( - "Vault recovery keys rekeyed with %d key shares and a key threshold of %d. Please "+ - "securely distribute the key shares printed above.", - status.N, - status.T))) - } - + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault rekeyed with %d key shares and a key threshold of %d. Please "+ + "securely distribute the key shares printed above. When Vault is "+ + "re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + status.N, + status.T, + status.T))) default: c.UI.Output("") - var warningText string - switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { - case "barrier": - c.UI.Output(wrapAtLength(fmt.Sprintf( - "Vault has created a new unseal key, split into %d key shares and a key threshold "+ - "of %d. These will not be active until after verification is complete. "+ - "Please securely distribute the key shares printed above. When Vault "+ - "is re-sealed, restarted, or stopped, you must supply at least %d of "+ - "these keys to unseal it before it can start servicing requests.", - status.N, - status.T, - status.T))) - warningText = "unseal" - case "recovery", "hsm": - c.UI.Output(wrapAtLength(fmt.Sprintf( - "Vault has created a new recovery key, split into %d key shares and a key threshold "+ - "of %d. These will not be active until after verification is complete. "+ - "Please securely distribute the key shares printed above.", - status.N, - status.T))) - warningText = "authenticate with" - - } + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault has created a new key, split into %d key shares and a key threshold "+ + "of %d. These will not be active until after verification is complete. "+ + "Please securely distribute the key shares printed above. When Vault "+ + "is re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + status.N, + status.T, + status.T))) c.UI.Output("") - c.UI.Warn(wrapAtLength(fmt.Sprintf( - "Again, these key shares are _not_ valid until verification is performed. "+ - "Do not lose or discard your current key shares until after verification "+ - "is complete or you will be unable to %s Vault. If you cancel the "+ - "rekey process or seal Vault before verification is complete the new "+ - "shares will be discarded and the current shares will remain valid.", warningText))) + c.UI.Warn(wrapAtLength( + "Again, these key shares are _not_ valid until verification is performed. " + + "Do not lose or discard your current key shares until after verification " + + "is complete or you will be unable to unseal Vault. If you cancel the " + + "rekey process or seal Vault before verification is complete the new " + + "shares will be discarded and the current shares will remain valid.", + )) c.UI.Output("") c.UI.Warn(wrapAtLength( "The current verification status, including initial nonce, is shown below.", diff --git a/command/operator_rekey_test.go b/command/operator_rekey_test.go index 570cfe447e69e..31617e5ac4bc7 100644 --- a/command/operator_rekey_test.go +++ b/command/operator_rekey_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !race package command @@ -12,8 +9,6 @@ import ( "strings" "testing" - "github.com/hashicorp/vault/sdk/helper/roottoken" - "github.com/hashicorp/vault/api" "github.com/mitchellh/cli" ) @@ -259,83 +254,6 @@ func TestOperatorRekeyCommand_Run(t *testing.T) { } }) - t.Run("provide_arg_recovery_keys", func(t *testing.T) { - t.Parallel() - - client, keys, closer := testVaultServerAutoUnseal(t) - defer closer() - - // Initialize a rekey - status, err := client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{ - SecretShares: 1, - SecretThreshold: 1, - }) - if err != nil { - t.Fatal(err) - } - nonce := status.Nonce - - // Supply the first n-1 recovery keys - for _, key := range keys[:len(keys)-1] { - ui, cmd := testOperatorRekeyCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "-nonce", nonce, - "-target", "recovery", - key, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) - } - } - - ui, cmd := testOperatorRekeyCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "-nonce", nonce, - "-target", "recovery", - keys[len(keys)-1], // the last recovery key - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) - } - - re := regexp.MustCompile(`Key 1: (.+)`) - output := ui.OutputWriter.String() - match := re.FindAllStringSubmatch(output, -1) - if len(match) < 1 || len(match[0]) < 2 { - t.Fatalf("bad match: %#v", match) - } - recoveryKey := match[0][1] - - if strings.Contains(strings.ToLower(output), "unseal key") { - t.Fatalf(`output %s shouldn't contain "unseal key"`, output) - } - - // verify that we can perform operations with the recovery key - // below we generate a root token using the recovery key - rootStatus, err := client.Sys().GenerateRootStatus() - if err != nil { - t.Fatal(err) - } - otp, err := roottoken.GenerateOTP(rootStatus.OTPLength) - if err != nil { - t.Fatal(err) - } - genRoot, err := client.Sys().GenerateRootInit(otp, "") - if err != nil { - t.Fatal(err) - } - r, err := client.Sys().GenerateRootUpdate(recoveryKey, genRoot.Nonce) - if err != nil { - t.Fatal(err) - } - if !r.Complete { - t.Fatal("expected root update to be complete") - } - }) t.Run("provide_arg", func(t *testing.T) { t.Parallel() @@ -474,94 +392,6 @@ func TestOperatorRekeyCommand_Run(t *testing.T) { } }) - t.Run("provide_stdin_recovery_keys", func(t *testing.T) { - t.Parallel() - - client, keys, closer := testVaultServerAutoUnseal(t) - defer closer() - - // Initialize a rekey - status, err := client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{ - SecretShares: 1, - SecretThreshold: 1, - }) - if err != nil { - t.Fatal(err) - } - nonce := status.Nonce - for _, key := range keys[:len(keys)-1] { - stdinR, stdinW := io.Pipe() - go func() { - _, _ = stdinW.Write([]byte(key)) - _ = stdinW.Close() - }() - - ui, cmd := testOperatorRekeyCommand(t) - cmd.client = client - cmd.testStdin = stdinR - - code := cmd.Run([]string{ - "-target", "recovery", - "-nonce", nonce, - "-", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) - } - } - - stdinR, stdinW := io.Pipe() - go func() { - _, _ = stdinW.Write([]byte(keys[len(keys)-1])) // the last recovery key - _ = stdinW.Close() - }() - - ui, cmd := testOperatorRekeyCommand(t) - cmd.client = client - cmd.testStdin = stdinR - - code := cmd.Run([]string{ - "-nonce", nonce, - "-target", "recovery", - "-", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) - } - - re := regexp.MustCompile(`Key 1: (.+)`) - output := ui.OutputWriter.String() - match := re.FindAllStringSubmatch(output, -1) - if len(match) < 1 || len(match[0]) < 2 { - t.Fatalf("bad match: %#v", match) - } - recoveryKey := match[0][1] - - if strings.Contains(strings.ToLower(output), "unseal key") { - t.Fatalf(`output %s shouldn't contain "unseal key"`, output) - } - // verify that we can perform operations with the recovery key - // below we generate a root token using the recovery key - rootStatus, err := client.Sys().GenerateRootStatus() - if err != nil { - t.Fatal(err) - } - otp, err := roottoken.GenerateOTP(rootStatus.OTPLength) - if err != nil { - t.Fatal(err) - } - genRoot, err := client.Sys().GenerateRootInit(otp, "") - if err != nil { - t.Fatal(err) - } - r, err := client.Sys().GenerateRootUpdate(recoveryKey, genRoot.Nonce) - if err != nil { - t.Fatal(err) - } - if !r.Complete { - t.Fatal("expected root update to be complete") - } - }) t.Run("backup", func(t *testing.T) { t.Parallel() diff --git a/command/operator_seal.go b/command/operator_seal.go index f52665fd08b10..369ec3215d66f 100644 --- a/command/operator_seal.go +++ b/command/operator_seal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_seal_test.go b/command/operator_seal_test.go index 43e150f7e37fc..86722d2e84dd7 100644 --- a/command/operator_seal_test.go +++ b/command/operator_seal_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_step_down.go b/command/operator_step_down.go index bfa2d893f18fb..dea2c97178da8 100644 --- a/command/operator_step_down.go +++ b/command/operator_step_down.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_step_down_test.go b/command/operator_step_down_test.go index fbe07794d2971..93117a856b665 100644 --- a/command/operator_step_down_test.go +++ b/command/operator_step_down_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_unseal.go b/command/operator_unseal.go index 32d9140900a51..8cdd06d384087 100644 --- a/command/operator_unseal.go +++ b/command/operator_unseal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_unseal_test.go b/command/operator_unseal_test.go index cb4d19603ba4f..867b17a03c01e 100644 --- a/command/operator_unseal_test.go +++ b/command/operator_unseal_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/operator_usage.go b/command/operator_usage.go index 8db538a298c54..1df42091ccc31 100644 --- a/command/operator_usage.go +++ b/command/operator_usage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/patch.go b/command/patch.go deleted file mode 100644 index 9a4cd58862875..0000000000000 --- a/command/patch.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "context" - "fmt" - "io" - "os" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*PatchCommand)(nil) - _ cli.CommandAutocomplete = (*PatchCommand)(nil) -) - -// PatchCommand is a Command that puts data into the Vault. -type PatchCommand struct { - *BaseCommand - - flagForce bool - - testStdin io.Reader // for tests -} - -func (c *PatchCommand) Synopsis() string { - return "Patch data, configuration, and secrets" -} - -func (c *PatchCommand) Help() string { - helpText := ` -Usage: vault patch [options] PATH [DATA K=V...] - - Patches data in Vault at the given path. The data can be credentials, secrets, - configuration, or arbitrary data. The specific behavior of this command is - determined at the thing mounted at the path. - - Data is specified as "key=value" pairs. If the value begins with an "@", then - it is loaded from a file. If the value is "-", Vault will read the value from - stdin. - - Unlike write, patch will only modify specified fields. - - Persist data in the generic secrets engine without modifying any other fields: - - $ vault patch pki/roles/example allow_localhost=false - - The data can also be consumed from a file on disk by prefixing with the "@" - symbol. For example: - - $ vault patch pki/roles/example @role.json - - Or it can be read from stdin using the "-" symbol: - - $ echo "example.com" | vault patch pki/roles/example allowed_domains=- - - For a full list of examples and paths, please see the documentation that - corresponds to the secret engines in use. - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *PatchCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - f := set.NewFlagSet("Command Options") - - f.BoolVar(&BoolVar{ - Name: "force", - Aliases: []string{"f"}, - Target: &c.flagForce, - Default: false, - EnvVar: "", - Completion: complete.PredictNothing, - Usage: "Allow the operation to continue with no key=value pairs. This " + - "allows writing to keys that do not need or expect data.", - }) - - return set -} - -func (c *PatchCommand) AutocompleteArgs() complete.Predictor { - // Return an anything predictor here. Without a way to access help - // information, we don't know what paths we could patch. - return complete.PredictAnything -} - -func (c *PatchCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *PatchCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) == 1 && !c.flagForce: - c.UI.Error("Must supply data or use -force") - return 1 - } - - // Pull our fake stdin if needed - stdin := (io.Reader)(os.Stdin) - if c.testStdin != nil { - stdin = c.testStdin - } - - path := sanitizePath(args[0]) - - data, err := parseArgsData(stdin, args[1:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - secret, err := client.Logical().JSONMergePatch(context.Background(), path, data) - return handleWriteSecretOutput(c.BaseCommand, path, secret, err) -} diff --git a/command/patch_test.go b/command/patch_test.go deleted file mode 100644 index 410e644656184..0000000000000 --- a/command/patch_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "io" - "strings" - "testing" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" -) - -func testPatchCommand(tb testing.TB) (*cli.MockUi, *PatchCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &PatchCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -func TestPatchCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "not_enough_args", - []string{}, - "Not enough arguments", - 1, - }, - { - "empty_kvs", - []string{"secret/write/foo"}, - "Must supply data or use -force", - 1, - }, - { - "force_kvs", - []string{"-force", "pki/roles/example"}, - "allow_localhost", - 0, - }, - { - "force_f_kvs", - []string{"-f", "pki/roles/example"}, - "allow_localhost", - 0, - }, - { - "kvs_no_value", - []string{"pki/roles/example", "foo"}, - "Failed to parse K=V data", - 1, - }, - { - "single_value", - []string{"pki/roles/example", "allow_localhost=true"}, - "allow_localhost", - 0, - }, - { - "multi_value", - []string{"pki/roles/example", "allow_localhost=true", "allowed_domains=true"}, - "allow_localhost", - 0, - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if _, err := client.Logical().Write("pki/roles/example", nil); err != nil { - t.Fatalf("failed to prime role: %v", err) - } - - if _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X1", - }); err != nil { - t.Fatalf("failed to prime CA: %v", err) - } - - ui, cmd := testPatchCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } - - t.Run("stdin_full", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if _, err := client.Logical().Write("pki/roles/example", nil); err != nil { - t.Fatalf("failed to prime role: %v", err) - } - - if _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X1", - }); err != nil { - t.Fatalf("failed to prime CA: %v", err) - } - - stdinR, stdinW := io.Pipe() - go func() { - stdinW.Write([]byte(`{"allow_localhost":"false","allow_wildcard_certificates":"false"}`)) - stdinW.Close() - }() - - ui, cmd := testPatchCommand(t) - cmd.client = client - cmd.testStdin = stdinR - - code := cmd.Run([]string{ - "pki/roles/example", "-", - }) - if code != 0 { - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - t.Fatalf("expected retcode=%d to be 0\nOutput:\n%v", code, combined) - } - - secret, err := client.Logical().Read("pki/roles/example") - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Data == nil { - t.Fatal("expected secret to have data") - } - if exp, act := false, secret.Data["allow_localhost"].(bool); exp != act { - t.Errorf("expected allowed_localhost=%v to be %v", act, exp) - } - if exp, act := false, secret.Data["allow_wildcard_certificates"].(bool); exp != act { - t.Errorf("expected allow_wildcard_certificates=%v to be %v", act, exp) - } - }) - - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerBad(t) - defer closer() - - ui, cmd := testPatchCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "foo/bar", "a=b", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Error writing data to foo/bar: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - }) - - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() - - _, cmd := testPatchCommand(t) - assertNoTabs(t, cmd) - }) -} diff --git a/command/path_help.go b/command/path_help.go index 41f3bcee66a6e..1f540a5c6ab00 100644 --- a/command/path_help.go +++ b/command/path_help.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/path_help_test.go b/command/path_help_test.go index eaf4fe8a9f239..688bcf09cef3c 100644 --- a/command/path_help_test.go +++ b/command/path_help_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/path_map_upgrade_api_test.go b/command/path_map_upgrade_api_test.go new file mode 100644 index 0000000000000..b9e573b2797ea --- /dev/null +++ b/command/path_map_upgrade_api_test.go @@ -0,0 +1,92 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + credAppId "github.com/hashicorp/vault/builtin/credential/app-id" +) + +func TestPathMap_Upgrade_API(t *testing.T) { + var err error + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + CredentialBackends: map[string]logical.Factory{ + "app-id": credAppId.Factory, + }, + PendingRemovalMountsAllowed: true, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + // Enable the app-id method + err = client.Sys().EnableAuthWithOptions("app-id", &api.EnableAuthOptions{ + Type: "app-id", + }) + if err != nil { + t.Fatal(err) + } + + // Create an app-id + _, err = client.Logical().Write("auth/app-id/map/app-id/test-app-id", map[string]interface{}{ + "policy": "test-policy", + }) + if err != nil { + t.Fatal(err) + } + + // Create a user-id + _, err = client.Logical().Write("auth/app-id/map/user-id/test-user-id", map[string]interface{}{ + "value": "test-app-id", + }) + if err != nil { + t.Fatal(err) + } + + // Perform a login. It should succeed. + _, err = client.Logical().Write("auth/app-id/login", map[string]interface{}{ + "app_id": "test-app-id", + "user_id": "test-user-id", + }) + if err != nil { + t.Fatal(err) + } + + // List the hashed app-ids in the storage + secret, err := client.Logical().List("auth/app-id/map/app-id") + if err != nil { + t.Fatal(err) + } + hashedAppID := secret.Data["keys"].([]interface{})[0].(string) + + // Try reading it. This used to cause an issue which is fixed in [GH-3806]. + _, err = client.Logical().Read("auth/app-id/map/app-id/" + hashedAppID) + if err != nil { + t.Fatal(err) + } + + // Ensure that there was no issue by performing another login + _, err = client.Logical().Write("auth/app-id/login", map[string]interface{}{ + "app_id": "test-app-id", + "user_id": "test-user-id", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/command/pgp_test.go b/command/pgp_test.go index f37e488ed6865..8d0b5d6a0474a 100644 --- a/command/pgp_test.go +++ b/command/pgp_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -16,8 +13,8 @@ import ( "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/vault" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) func getPubKeyFiles(t *testing.T) (string, []string, error) { diff --git a/command/pki.go b/command/pki.go deleted file mode 100644 index 89770fa448516..0000000000000 --- a/command/pki.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -var _ cli.Command = (*PKICommand)(nil) - -type PKICommand struct { - *BaseCommand -} - -func (c *PKICommand) Synopsis() string { - return "Interact with Vault's PKI Secrets Engine" -} - -func (c *PKICommand) Help() string { - helpText := ` -Usage: vault pki [options] [args] - - This command has subcommands for interacting with Vault's PKI Secrets - Engine. Here are some simple examples, and more detailed examples are - available in the subcommands or the documentation. - - Check the health of a PKI mount, to the best of this token's abilities: - - $ vault pki health-check pki - - Please see the individual subcommand help for detailed usage information. -` - - return strings.TrimSpace(helpText) -} - -func (c *PKICommand) Run(args []string) int { - return cli.RunResultHelp -} diff --git a/command/pki_health_check.go b/command/pki_health_check.go deleted file mode 100644 index 188c95c99e93a..0000000000000 --- a/command/pki_health_check.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "encoding/json" - "fmt" - "os" - "strings" - - "github.com/hashicorp/vault/command/healthcheck" - - "github.com/ghodss/yaml" - "github.com/mitchellh/cli" - "github.com/posener/complete" - "github.com/ryanuber/columnize" -) - -const ( - pkiRetOK int = iota - pkiRetUsage - pkiRetInformational - pkiRetWarning - pkiRetCritical - pkiRetInvalidVersion - pkiRetInsufficientPermissions -) - -var ( - _ cli.Command = (*PKIHealthCheckCommand)(nil) - _ cli.CommandAutocomplete = (*PKIHealthCheckCommand)(nil) - - // Ensure the above return codes match (outside of OK/Usage) the values in - // the healthcheck package. - _ = pkiRetInformational == int(healthcheck.ResultInformational) - _ = pkiRetWarning == int(healthcheck.ResultWarning) - _ = pkiRetCritical == int(healthcheck.ResultCritical) - _ = pkiRetInvalidVersion == int(healthcheck.ResultInvalidVersion) - _ = pkiRetInsufficientPermissions == int(healthcheck.ResultInsufficientPermissions) -) - -type PKIHealthCheckCommand struct { - *BaseCommand - - flagConfig string - flagReturnIndicator string - flagDefaultDisabled bool - flagList bool -} - -func (c *PKIHealthCheckCommand) Synopsis() string { - return "Check a PKI Secrets Engine mount's health and operational status" -} - -func (c *PKIHealthCheckCommand) Help() string { - helpText := ` -Usage: vault pki health-check [options] MOUNT - - Reports status of the specified mount against best practices and pending - failures. This is an informative command and not all recommendations will - apply to all mounts; consider using a configuration file to tune the - executed health checks. - - To check the pki-root mount with default configuration: - - $ vault pki health-check pki-root - - To specify a configuration: - - $ vault pki health-check -health-config=mycorp-root.json /pki-root - - Return codes indicate failure type: - - 0 - Everything is good. - 1 - Usage error (check CLI parameters). - 2 - Informational message from a health check. - 3 - Warning message from a health check. - 4 - Critical message from a health check. - 5 - A version mismatch between health check and Vault Server occurred, - preventing one or more health checks from being run. - 6 - A permission denied message was returned from Vault Server for - one or more health checks. - -For more detailed information, refer to the online documentation about the -vault pki health-check command. - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *PKIHealthCheckCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "health-config", - Target: &c.flagConfig, - Default: "", - EnvVar: "", - Usage: "Path to JSON configuration file to modify health check execution and parameters.", - }) - - f.StringVar(&StringVar{ - Name: "return-indicator", - Target: &c.flagReturnIndicator, - Default: "default", - EnvVar: "", - Completion: complete.PredictSet("default", "informational", "warning", "critical", "permission"), - Usage: `Behavior of the return value: - - permission, for exiting with a non-zero code when the tool lacks - permissions or has a version mismatch with the server; - - critical, for exiting with a non-zero code when a check returns a - critical status in addition to the above; - - warning, for exiting with a non-zero status when a check returns a - warning status in addition to the above; - - informational, for exiting with a non-zero status when a check returns - an informational status in addition to the above; - - default, for the default behavior based on severity of message and - only returning a zero exit status when all checks have passed - and no execution errors have occurred. - `, - }) - - f.BoolVar(&BoolVar{ - Name: "default-disabled", - Target: &c.flagDefaultDisabled, - Default: false, - EnvVar: "", - Usage: `When specified, results in all health checks being disabled by -default unless enabled by the configuration file explicitly.`, - }) - - f.BoolVar(&BoolVar{ - Name: "list", - Target: &c.flagList, - Default: false, - EnvVar: "", - Usage: `When specified, no health checks are run, but all known health -checks are printed.`, - }) - - return set -} - -func (c *PKIHealthCheckCommand) isValidRetIndicator() bool { - switch c.flagReturnIndicator { - case "", "default", "informational", "warning", "critical", "permission": - return true - default: - return false - } -} - -func (c *PKIHealthCheckCommand) AutocompleteArgs() complete.Predictor { - // Return an anything predictor here, similar to `vault write`. We - // don't know what values are valid for the mount path. - return complete.PredictAnything -} - -func (c *PKIHealthCheckCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *PKIHealthCheckCommand) Run(args []string) int { - // Parse and validate the arguments. - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return pkiRetUsage - } - - args = f.Args() - if !c.flagList && len(args) < 1 { - c.UI.Error("Not enough arguments (expected mount path, got nothing)") - return pkiRetUsage - } else if !c.flagList && len(args) > 1 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected only mount path, got %d arguments)", len(args))) - for _, arg := range args { - if strings.HasPrefix(arg, "-") { - c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) - break - } - } - return pkiRetUsage - } - - if !c.isValidRetIndicator() { - c.UI.Error(fmt.Sprintf("Invalid flag -return-indicator=%v; known options are default, informational, warning, critical, and permission", c.flagReturnIndicator)) - return pkiRetUsage - } - - // Setup the client and the executor. - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return pkiRetUsage - } - - // When listing is enabled, we lack an argument here, but do not contact - // the server at all, so we're safe to use a hard-coded default here. - pkiPath := "" - if len(args) == 1 { - pkiPath = args[0] - } - - mount := sanitizePath(pkiPath) - executor := healthcheck.NewExecutor(client, mount) - executor.AddCheck(healthcheck.NewCAValidityPeriodCheck()) - executor.AddCheck(healthcheck.NewCRLValidityPeriodCheck()) - executor.AddCheck(healthcheck.NewHardwareBackedRootCheck()) - executor.AddCheck(healthcheck.NewRootIssuedLeavesCheck()) - executor.AddCheck(healthcheck.NewRoleAllowsLocalhostCheck()) - executor.AddCheck(healthcheck.NewRoleAllowsGlobWildcardsCheck()) - executor.AddCheck(healthcheck.NewRoleNoStoreFalseCheck()) - executor.AddCheck(healthcheck.NewAuditVisibilityCheck()) - executor.AddCheck(healthcheck.NewAllowIfModifiedSinceCheck()) - executor.AddCheck(healthcheck.NewEnableAutoTidyCheck()) - executor.AddCheck(healthcheck.NewTidyLastRunCheck()) - executor.AddCheck(healthcheck.NewTooManyCertsCheck()) - executor.AddCheck(healthcheck.NewEnableAcmeIssuance()) - executor.AddCheck(healthcheck.NewAllowAcmeHeaders()) - if c.flagDefaultDisabled { - executor.DefaultEnabled = false - } - - // Handle listing, if necessary. - if c.flagList { - uiFormat := Format(c.UI) - if uiFormat == "yaml" { - c.UI.Error("YAML output format is not supported by the --list command") - return pkiRetUsage - } - - if uiFormat != "json" { - c.UI.Output("Default health check config:") - } - config := map[string]map[string]interface{}{} - for _, checker := range executor.Checkers { - config[checker.Name()] = checker.DefaultConfig() - } - - marshaled, err := json.MarshalIndent(config, "", " ") - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to marshal default config for check: %v", err)) - return pkiRetUsage - } - - c.UI.Output(string(marshaled)) - return pkiRetOK - } - - // Handle config merging. - external_config := map[string]interface{}{} - if c.flagConfig != "" { - contents, err := os.Open(c.flagConfig) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to read configuration file %v: %v", c.flagConfig, err)) - return pkiRetUsage - } - - decoder := json.NewDecoder(contents) - decoder.UseNumber() // Use json.Number instead of float64 values as we are decoding to an interface{}. - - if err := decoder.Decode(&external_config); err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse configuration file %v: %v", c.flagConfig, err)) - return pkiRetUsage - } - } - - if err := executor.BuildConfig(external_config); err != nil { - c.UI.Error(fmt.Sprintf("Failed to build health check configuration: %v", err)) - return pkiRetUsage - } - - // Run the health checks. - results, err := executor.Execute() - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to run health check: %v", err)) - return pkiRetUsage - } - - // Display the output. - if err := c.outputResults(executor, results); err != nil { - c.UI.Error(fmt.Sprintf("Failed to render results for display: %v", err)) - } - - // Select an appropriate return code. - return c.selectRetCode(results) -} - -func (c *PKIHealthCheckCommand) outputResults(e *healthcheck.Executor, results map[string][]*healthcheck.Result) error { - switch Format(c.UI) { - case "", "table": - return c.outputResultsTable(e, results) - case "json": - return c.outputResultsJSON(results) - case "yaml": - return c.outputResultsYAML(results) - default: - return fmt.Errorf("unknown output format: %v", Format(c.UI)) - } -} - -func (c *PKIHealthCheckCommand) outputResultsTable(e *healthcheck.Executor, results map[string][]*healthcheck.Result) error { - // Iterate in checker order to ensure stable output. - for _, checker := range e.Checkers { - if !checker.IsEnabled() { - continue - } - - scanner := checker.Name() - findings := results[scanner] - - c.UI.Output(scanner) - c.UI.Output(strings.Repeat("-", len(scanner))) - data := []string{"status" + hopeDelim + "endpoint" + hopeDelim + "message"} - for _, finding := range findings { - row := []string{ - finding.StatusDisplay, - finding.Endpoint, - finding.Message, - } - data = append(data, strings.Join(row, hopeDelim)) - } - - c.UI.Output(tableOutput(data, &columnize.Config{ - Delim: hopeDelim, - })) - c.UI.Output("\n") - } - - return nil -} - -func (c *PKIHealthCheckCommand) outputResultsJSON(results map[string][]*healthcheck.Result) error { - bytes, err := json.MarshalIndent(results, "", " ") - if err != nil { - return err - } - - c.UI.Output(string(bytes)) - return nil -} - -func (c *PKIHealthCheckCommand) outputResultsYAML(results map[string][]*healthcheck.Result) error { - bytes, err := yaml.Marshal(results) - if err != nil { - return err - } - - c.UI.Output(string(bytes)) - return nil -} - -func (c *PKIHealthCheckCommand) selectRetCode(results map[string][]*healthcheck.Result) int { - var highestResult healthcheck.ResultStatus = healthcheck.ResultNotApplicable - for _, findings := range results { - for _, finding := range findings { - if finding.Status > highestResult { - highestResult = finding.Status - } - } - } - - cutOff := healthcheck.ResultInformational - switch c.flagReturnIndicator { - case "", "default", "informational": - case "permission": - cutOff = healthcheck.ResultInvalidVersion - case "critical": - cutOff = healthcheck.ResultCritical - case "warning": - cutOff = healthcheck.ResultWarning - } - - if highestResult >= cutOff { - return int(highestResult) - } - - return pkiRetOK -} diff --git a/command/pki_health_check_test.go b/command/pki_health_check_test.go deleted file mode 100644 index 93d231593ee78..0000000000000 --- a/command/pki_health_check_test.go +++ /dev/null @@ -1,694 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "encoding/json" - "fmt" - "net/url" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/healthcheck" - - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" -) - -func TestPKIHC_AllGood(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - AuditNonHMACRequestKeys: healthcheck.VisibleReqParams, - AuditNonHMACResponseKeys: healthcheck.VisibleRespParams, - PassthroughRequestHeaders: []string{"If-Modified-Since"}, - AllowedResponseHeaders: []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X1", - "ttl": "3650d", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if _, err := client.Logical().Read("pki/crl/rotate"); err != nil { - t.Fatalf("failed to rotate CRLs: %v", err) - } - - if _, err := client.Logical().Write("pki/roles/testing", map[string]interface{}{ - "allow_any_name": true, - "no_store": true, - }); err != nil { - t.Fatalf("failed to write role: %v", err) - } - - if _, err := client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": true, - "tidy_cert_store": true, - }); err != nil { - t.Fatalf("failed to write auto-tidy config: %v", err) - } - - if _, err := client.Logical().Write("pki/tidy", map[string]interface{}{ - "tidy_cert_store": true, - }); err != nil { - t.Fatalf("failed to run tidy: %v", err) - } - - path, err := url.Parse(client.Address()) - require.NoError(t, err, "failed parsing client address") - - if _, err := client.Logical().Write("pki/config/cluster", map[string]interface{}{ - "path": path.JoinPath("/v1/", "pki/").String(), - }); err != nil { - t.Fatalf("failed to update local cluster: %v", err) - } - - if _, err := client.Logical().Write("pki/config/acme", map[string]interface{}{ - "enabled": "true", - }); err != nil { - t.Fatalf("failed to update acme config: %v", err) - } - - _, _, results := execPKIHC(t, client, true) - - validateExpectedPKIHC(t, expectedAllGood, results) -} - -func TestPKIHC_AllBad(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X1", - "ttl": "35d", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if _, err := client.Logical().Write("pki/config/crl", map[string]interface{}{ - "expiry": "5s", - }); err != nil { - t.Fatalf("failed to issue leaf cert: %v", err) - } - - if _, err := client.Logical().Read("pki/crl/rotate"); err != nil { - t.Fatalf("failed to rotate CRLs: %v", err) - } - - time.Sleep(5 * time.Second) - - if _, err := client.Logical().Write("pki/roles/testing", map[string]interface{}{ - "allow_localhost": true, - "allowed_domains": "*.example.com", - "allow_glob_domains": true, - "allow_wildcard_certificates": true, - "no_store": false, - "key_type": "ec", - "ttl": "30d", - }); err != nil { - t.Fatalf("failed to write role: %v", err) - } - - if _, err := client.Logical().Write("pki/issue/testing", map[string]interface{}{ - "common_name": "something.example.com", - }); err != nil { - t.Fatalf("failed to issue leaf cert: %v", err) - } - - if _, err := client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": false, - "tidy_cert_store": false, - }); err != nil { - t.Fatalf("failed to write auto-tidy config: %v", err) - } - - _, _, results := execPKIHC(t, client, true) - - validateExpectedPKIHC(t, expectedAllBad, results) -} - -func TestPKIHC_OnlyIssuer(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X1", - "ttl": "35d", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - _, _, results := execPKIHC(t, client, true) - validateExpectedPKIHC(t, expectedEmptyWithIssuer, results) -} - -func TestPKIHC_NoMount(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - code, message, _ := execPKIHC(t, client, false) - if code != 1 { - t.Fatalf("Expected return code 1 from invocation on non-existent mount, got %v\nOutput: %v", code, message) - } - - if !strings.Contains(message, "route entry not found") { - t.Fatalf("Expected failure to talk about missing route entry, got exit code %v\nOutput: %v", code, message) - } -} - -func TestPKIHC_ExpectedEmptyMount(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - code, message, _ := execPKIHC(t, client, false) - if code != 1 { - t.Fatalf("Expected return code 1 from invocation on empty mount, got %v\nOutput: %v", code, message) - } - - if !strings.Contains(message, "lacks any configured issuers,") { - t.Fatalf("Expected failure to talk about no issuers, got exit code %v\nOutput: %v", code, message) - } -} - -func TestPKIHC_NoPerm(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X1", - "ttl": "35d", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if _, err := client.Logical().Write("pki/config/crl", map[string]interface{}{ - "expiry": "5s", - }); err != nil { - t.Fatalf("failed to issue leaf cert: %v", err) - } - - if _, err := client.Logical().Read("pki/crl/rotate"); err != nil { - t.Fatalf("failed to rotate CRLs: %v", err) - } - - time.Sleep(5 * time.Second) - - if _, err := client.Logical().Write("pki/roles/testing", map[string]interface{}{ - "allow_localhost": true, - "allowed_domains": "*.example.com", - "allow_glob_domains": true, - "allow_wildcard_certificates": true, - "no_store": false, - "key_type": "ec", - "ttl": "30d", - }); err != nil { - t.Fatalf("failed to write role: %v", err) - } - - if _, err := client.Logical().Write("pki/issue/testing", map[string]interface{}{ - "common_name": "something.example.com", - }); err != nil { - t.Fatalf("failed to issue leaf cert: %v", err) - } - - if _, err := client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": false, - "tidy_cert_store": false, - }); err != nil { - t.Fatalf("failed to write auto-tidy config: %v", err) - } - - // Remove client token. - client.ClearToken() - - _, _, results := execPKIHC(t, client, true) - validateExpectedPKIHC(t, expectedNoPerm, results) -} - -func testPKIHealthCheckCommand(tb testing.TB) (*cli.MockUi, *PKIHealthCheckCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &PKIHealthCheckCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -func execPKIHC(t *testing.T, client *api.Client, ok bool) (int, string, map[string][]map[string]interface{}) { - t.Helper() - - stdout := bytes.NewBuffer(nil) - stderr := bytes.NewBuffer(nil) - runOpts := &RunOptions{ - Stdout: stdout, - Stderr: stderr, - Client: client, - } - - code := RunCustom([]string{"pki", "health-check", "-format=json", "pki"}, runOpts) - combined := stdout.String() + stderr.String() - - var results map[string][]map[string]interface{} - if err := json.Unmarshal([]byte(combined), &results); err != nil { - if ok { - t.Fatalf("failed to decode json (ret %v): %v\njson:\n%v", code, err, combined) - } - } - - t.Log(combined) - - return code, combined, results -} - -func validateExpectedPKIHC(t *testing.T, expected, results map[string][]map[string]interface{}) { - t.Helper() - - for test, subtest := range expected { - actual, ok := results[test] - require.True(t, ok, fmt.Sprintf("expected top-level test %v to be present", test)) - - if subtest == nil { - continue - } - - require.NotNil(t, actual, fmt.Sprintf("expected top-level test %v to be non-empty; wanted wireframe format %v", test, subtest)) - require.Equal(t, len(subtest), len(actual), fmt.Sprintf("top-level test %v has different number of results %v in wireframe, %v in test output\nwireframe: %v\noutput: %v\n", test, len(subtest), len(actual), subtest, actual)) - - for index, subset := range subtest { - for key, value := range subset { - a_value, present := actual[index][key] - require.True(t, present) - if value != nil { - require.Equal(t, value, a_value, fmt.Sprintf("in test: %v / result %v - when validating key %v\nWanted: %v\nGot: %v", test, index, key, subset, actual[index])) - } - } - } - } - - for name := range results { - if _, present := expected[name]; !present { - t.Fatalf("got unexpected health check: %v\n%v", name, results[name]) - } - } -} - -var expectedAllGood = map[string][]map[string]interface{}{ - "ca_validity_period": { - { - "status": "ok", - }, - }, - "crl_validity_period": { - { - "status": "ok", - }, - { - "status": "ok", - }, - }, - "allow_acme_headers": { - { - "status": "ok", - }, - }, - "allow_if_modified_since": { - { - "status": "ok", - }, - }, - "audit_visibility": { - { - "status": "ok", - }, - }, - "enable_acme_issuance": { - { - "status": "ok", - }, - }, - "enable_auto_tidy": { - { - "status": "ok", - }, - }, - "role_allows_glob_wildcards": { - { - "status": "ok", - }, - }, - "role_allows_localhost": { - { - "status": "ok", - }, - }, - "role_no_store_false": { - { - "status": "ok", - }, - }, - "root_issued_leaves": { - { - "status": "ok", - }, - }, - "tidy_last_run": { - { - "status": "ok", - }, - }, - "too_many_certs": { - { - "status": "ok", - }, - }, -} - -var expectedAllBad = map[string][]map[string]interface{}{ - "ca_validity_period": { - { - "status": "critical", - }, - }, - "crl_validity_period": { - { - "status": "critical", - }, - { - "status": "critical", - }, - }, - "allow_acme_headers": { - { - "status": "not_applicable", - }, - }, - "allow_if_modified_since": { - { - "status": "informational", - }, - }, - "audit_visibility": { - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - { - "status": "informational", - }, - }, - "enable_acme_issuance": { - { - "status": "not_applicable", - }, - }, - "enable_auto_tidy": { - { - "status": "informational", - }, - }, - "role_allows_glob_wildcards": { - { - "status": "warning", - }, - }, - "role_allows_localhost": { - { - "status": "warning", - }, - }, - "role_no_store_false": { - { - "status": "warning", - }, - }, - "root_issued_leaves": { - { - "status": "warning", - }, - }, - "tidy_last_run": { - { - "status": "critical", - }, - }, - "too_many_certs": { - { - "status": "ok", - }, - }, -} - -var expectedEmptyWithIssuer = map[string][]map[string]interface{}{ - "ca_validity_period": { - { - "status": "critical", - }, - }, - "crl_validity_period": { - { - "status": "ok", - }, - { - "status": "ok", - }, - }, - "allow_acme_headers": { - { - "status": "not_applicable", - }, - }, - "allow_if_modified_since": nil, - "audit_visibility": nil, - "enable_acme_issuance": { - { - "status": "not_applicable", - }, - }, - "enable_auto_tidy": { - { - "status": "informational", - }, - }, - "role_allows_glob_wildcards": nil, - "role_allows_localhost": nil, - "role_no_store_false": nil, - "root_issued_leaves": { - { - "status": "ok", - }, - }, - "tidy_last_run": { - { - "status": "critical", - }, - }, - "too_many_certs": { - { - "status": "ok", - }, - }, -} - -var expectedNoPerm = map[string][]map[string]interface{}{ - "ca_validity_period": { - { - "status": "critical", - }, - }, - "crl_validity_period": { - { - "status": "insufficient_permissions", - }, - { - "status": "critical", - }, - { - "status": "critical", - }, - }, - "allow_acme_headers": { - { - "status": "insufficient_permissions", - }, - }, - "allow_if_modified_since": nil, - "audit_visibility": nil, - "enable_acme_issuance": { - { - "status": "insufficient_permissions", - }, - }, - "enable_auto_tidy": { - { - "status": "insufficient_permissions", - }, - }, - "role_allows_glob_wildcards": { - { - "status": "insufficient_permissions", - }, - }, - "role_allows_localhost": { - { - "status": "insufficient_permissions", - }, - }, - "role_no_store_false": { - { - "status": "insufficient_permissions", - }, - }, - "root_issued_leaves": { - { - "status": "insufficient_permissions", - }, - }, - "tidy_last_run": { - { - "status": "insufficient_permissions", - }, - }, - "too_many_certs": { - { - "status": "insufficient_permissions", - }, - }, -} diff --git a/command/pki_issue_intermediate.go b/command/pki_issue_intermediate.go deleted file mode 100644 index fe16fdaaca91a..0000000000000 --- a/command/pki_issue_intermediate.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "context" - "fmt" - "io" - "os" - paths "path" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/posener/complete" -) - -type PKIIssueCACommand struct { - *BaseCommand - - flagConfig string - flagReturnIndicator string - flagDefaultDisabled bool - flagList bool - - flagKeyStorageSource string - flagNewIssuerName string -} - -func (c *PKIIssueCACommand) Synopsis() string { - return "Given a parent certificate, and a list of generation parameters, creates an issuer on a specified mount" -} - -func (c *PKIIssueCACommand) Help() string { - helpText := ` -Usage: vault pki issue PARENT CHILD_MOUNT options - -PARENT is the fully qualified path of the Certificate Authority in vault which will issue the new intermediate certificate. - -CHILD_MOUNT is the path of the mount in vault where the new issuer is saved. - -options are the superset of the options passed to generate/intermediate and sign-intermediate commands. At least one option must be set. - -This command creates a intermediate certificate authority certificate signed by the parent in the CHILD_MOUNT. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *PKIIssueCACommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "type", - Target: &c.flagKeyStorageSource, - Default: "internal", - EnvVar: "", - Usage: `Options are "existing" - to use an existing key inside vault, "internal" - to generate a new key inside vault, or "kms" - to link to an external key. Exported keys are not available through this API.`, - Completion: complete.PredictSet("internal", "existing", "kms"), - }) - - f.StringVar(&StringVar{ - Name: "issuer_name", - Target: &c.flagNewIssuerName, - Default: "", - EnvVar: "", - Usage: `If present, the newly created issuer will be given this name.`, - }) - - return set -} - -func (c *PKIIssueCACommand) Run(args []string) int { - // Parse Args - f := c.Flags() - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - args = f.Args() - - if len(args) < 3 { - c.UI.Error("Not enough arguments expected parent issuer and child-mount location and some key_value argument") - return 1 - } - - stdin := (io.Reader)(os.Stdin) - data, err := parseArgsData(stdin, args[2:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) - return 1 - } - - parentMountIssuer := sanitizePath(args[0]) // /pki/issuer/default - - intermediateMount := sanitizePath(args[1]) - - return pkiIssue(c.BaseCommand, parentMountIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) -} - -func pkiIssue(c *BaseCommand, parentMountIssuer string, intermediateMount string, flagNewIssuerName string, flagKeyStorageSource string, data map[string]interface{}) int { - // Check We Have a Client - client, err := c.Client() - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) - return 1 - } - - // Sanity Check the Parent Issuer - if !strings.Contains(parentMountIssuer, "/issuer/") { - c.UI.Error(fmt.Sprintf("Parent Issuer %v is Not a PKI Issuer Path of the format /mount/issuer/issuer-ref", parentMountIssuer)) - return 1 - } - _, err = readIssuer(client, parentMountIssuer) - if err != nil { - c.UI.Error(fmt.Sprintf("Unable to access parent issuer %v: %v", parentMountIssuer, err)) - return 1 - } - - // Set-up Failure State (Immediately Before First Write Call) - failureState := inCaseOfFailure{ - intermediateMount: intermediateMount, - parentMount: strings.Split(parentMountIssuer, "/issuer/")[0], - parentIssuer: parentMountIssuer, - newName: flagNewIssuerName, - } - - // Generate Certificate Signing Request - csrResp, err := client.Logical().Write(intermediateMount+"/intermediate/generate/"+flagKeyStorageSource, data) - if err != nil { - if strings.Contains(err.Error(), "no handler for route") { // Mount Given Does Not Exist - c.UI.Error(fmt.Sprintf("Given Intermediate Mount %v Does Not Exist: %v", intermediateMount, err)) - } else if strings.Contains(err.Error(), "unsupported path") { // Expected if Not a PKI Mount - c.UI.Error(fmt.Sprintf("Given Intermeidate Mount %v Is Not a PKI Mount: %v", intermediateMount, err)) - } else { - c.UI.Error(fmt.Sprintf("Failled to Generate Intermediate CSR on %v: %v", intermediateMount, err)) - } - return 1 - } - // Parse CSR Response, Also Verifies that this is a PKI Mount - // (e.g. calling the above call on cubbyhole/ won't return an error response) - csrPemRaw, present := csrResp.Data["csr"] - if !present { - c.UI.Error(fmt.Sprintf("Failed to Generate Intermediate CSR on %v, got response: %v", intermediateMount, csrResp)) - return 1 - } - keyIdRaw, present := csrResp.Data["key_id"] - if !present && flagKeyStorageSource == "internal" { - c.UI.Error(fmt.Sprintf("Failed to Generate Key on %v, got response: %v", intermediateMount, csrResp)) - return 1 - } - - // If that all Parses, then we've successfully generated a CSR! Save It (and the Key-ID) - failureState.csrGenerated = true - if flagKeyStorageSource == "internal" { - failureState.createdKeyId = keyIdRaw.(string) - } - csr := csrPemRaw.(string) - failureState.csr = csr - data["csr"] = csr - - // Next, Sign the CSR - rootResp, err := client.Logical().Write(parentMountIssuer+"/sign-intermediate", data) - if err != nil { - c.UI.Error(failureState.generateFailureMessage()) - c.UI.Error(fmt.Sprintf("Error Signing Intermiate On %v", err)) - return 1 - } - // Success! Save Our Progress (and Parse the Response) - failureState.csrSigned = true - serialNumber := rootResp.Data["serial_number"].(string) - failureState.certSerialNumber = serialNumber - - caChain := rootResp.Data["ca_chain"].([]interface{}) - caChainPemBundle := "" - for _, cert := range caChain { - caChainPemBundle += cert.(string) + "\n" - } - failureState.caChain = caChainPemBundle - - // Next Import Certificate - certificate := rootResp.Data["certificate"].(string) - issuerId, err := importIssuerWithName(client, intermediateMount, certificate, flagNewIssuerName) - failureState.certIssuerId = issuerId - if err != nil { - if strings.Contains(err.Error(), "error naming issuer") { - failureState.certImported = true - c.UI.Error(failureState.generateFailureMessage()) - c.UI.Error(fmt.Sprintf("Error Naming Newly Imported Issuer: %v", err)) - return 1 - } else { - c.UI.Error(failureState.generateFailureMessage()) - c.UI.Error(fmt.Sprintf("Error Importing Into %v Newly Created Issuer %v: %v", intermediateMount, certificate, err)) - return 1 - } - } - failureState.certImported = true - - // Then Import Issuing Certificate - issuingCa := rootResp.Data["issuing_ca"].(string) - _, parentIssuerName := paths.Split(parentMountIssuer) - _, err = importIssuerWithName(client, intermediateMount, issuingCa, parentIssuerName) - if err != nil { - if strings.Contains(err.Error(), "error naming issuer") { - c.UI.Warn(fmt.Sprintf("Unable to Set Name on Parent Cert from %v Imported Into %v with serial %v, err: %v", parentIssuerName, intermediateMount, serialNumber, err)) - } else { - c.UI.Error(failureState.generateFailureMessage()) - c.UI.Error(fmt.Sprintf("Error Importing Into %v Newly Created Issuer %v: %v", intermediateMount, certificate, err)) - return 1 - } - } - - // Finally Import CA_Chain (just in case there's more information) - if len(caChain) > 2 { // We've already imported parent cert and newly issued cert above - importData := map[string]interface{}{ - "pem_bundle": caChainPemBundle, - } - _, err := client.Logical().Write(intermediateMount+"/issuers/import/cert", importData) - if err != nil { - c.UI.Error(failureState.generateFailureMessage()) - c.UI.Error(fmt.Sprintf("Error Importing CaChain into %v: %v", intermediateMount, err)) - return 1 - } - } - failureState.caChainImported = true - - // Finally we read our newly issued certificate in order to tell our caller about it - readAndOutputNewCertificate(client, intermediateMount, issuerId, c) - - return 0 -} - -func readAndOutputNewCertificate(client *api.Client, intermediateMount string, issuerId string, c *BaseCommand) { - resp, err := client.Logical().Read(sanitizePath(intermediateMount + "/issuer/" + issuerId)) - if err != nil || resp == nil { - c.UI.Error(fmt.Sprintf("Error Reading Fully Imported Certificate from %v : %v", - intermediateMount+"/issuer/"+issuerId, err)) - return - } - - OutputSecret(c.UI, resp) -} - -func importIssuerWithName(client *api.Client, mount string, bundle string, name string) (issuerUUID string, err error) { - importData := map[string]interface{}{ - "pem_bundle": bundle, - } - writeResp, err := client.Logical().Write(mount+"/issuers/import/cert", importData) - if err != nil { - return "", err - } - mapping := writeResp.Data["mapping"].(map[string]interface{}) - if len(mapping) > 1 { - return "", fmt.Errorf("multiple issuers returned, while expected one, got %v", writeResp) - } - for issuerId := range mapping { - issuerUUID = issuerId - } - if name != "" && name != "default" { - nameReq := map[string]interface{}{ - "issuer_name": name, - } - ctx := context.Background() - _, err = client.Logical().JSONMergePatch(ctx, mount+"/issuer/"+issuerUUID, nameReq) - if err != nil { - return issuerUUID, fmt.Errorf("error naming issuer %v to %v: %v", issuerUUID, name, err) - } - } - return issuerUUID, nil -} - -type inCaseOfFailure struct { - csrGenerated bool - csrSigned bool - certImported bool - certNamed bool - caChainImported bool - - intermediateMount string - createdKeyId string - csr string - caChain string - parentMount string - parentIssuer string - certSerialNumber string - certIssuerId string - newName string -} - -func (state inCaseOfFailure) generateFailureMessage() string { - message := "A failure has occurred" - - if state.csrGenerated { - message += fmt.Sprintf(" after \n a Certificate Signing Request was successfully generated on mount %v", state.intermediateMount) - } - if state.csrSigned { - message += fmt.Sprintf(" and after \n that Certificate Signing Request was successfully signed by mount %v", state.parentMount) - } - if state.certImported { - message += fmt.Sprintf(" and after \n the signed certificate was reimported into mount %v , with issuerID %v", state.intermediateMount, state.certIssuerId) - } - - if state.csrGenerated { - message += "\n\nTO CONTINUE: \n" + state.toContinue() - } - if state.csrGenerated && !state.certImported { - message += "\n\nTO ABORT: \n" + state.toAbort() - } - - message += "\n" - - return message -} - -func (state inCaseOfFailure) toContinue() string { - message := "" - if !state.csrSigned { - message += fmt.Sprintf("You can continue to work with this Certificate Signing Request CSR PEM, by saving"+ - " it as `pki_int.csr`: %v \n Then call `vault write %v/sign-intermediate csr=@pki_int.csr ...` adding the "+ - "same key-value arguements as to `pki issue` (except key_type and issuer_name) to generate the certificate "+ - "and ca_chain", state.csr, state.parentIssuer) - } - if !state.certImported { - if state.caChain != "" { - message += fmt.Sprintf("The certificate chain, signed by %v, for this new certificate is: %v", state.parentIssuer, state.caChain) - } - message += fmt.Sprintf("You can continue to work with this Certificate (and chain) by saving it as "+ - "chain.pem and importing it as `vault write %v/issuers/import/cert pem_bundle=@chain.pem`", - state.intermediateMount) - } - if !state.certNamed { - issuerId := state.certIssuerId - if issuerId == "" { - message += fmt.Sprintf("The issuer_id is returned as the key in a key_value map from importing the " + - "certificate chain.") - issuerId = "" - } - message += fmt.Sprintf("You can name the newly imported issuer by calling `vault patch %v/issuer/%v "+ - "issuer_name=%v`", state.intermediateMount, issuerId, state.newName) - } - return message -} - -func (state inCaseOfFailure) toAbort() string { - if !state.csrGenerated || (!state.csrSigned && state.createdKeyId == "") { - return "No state was created by running this command. Try rerunning this command after resolving the error." - } - message := "" - if state.csrGenerated && state.createdKeyId != "" { - message += fmt.Sprintf(" A key, with key ID %v was created on mount %v as part of this command."+ - " If you do not with to use this key and corresponding CSR/cert, you can delete that information by calling"+ - " `vault delete %v/key/%v`", state.createdKeyId, state.intermediateMount, state.intermediateMount, state.createdKeyId) - } - if state.csrSigned { - message += fmt.Sprintf("A certificate with serial number %v was signed by mount %v as part of this command."+ - " If you do not want to use this certificate, consider revoking it by calling `vault write %v/revoke/%v`", - state.certSerialNumber, state.parentMount, state.parentMount, state.certSerialNumber) - } - //if state.certImported { - // message += fmt.Sprintf("An issuer with UUID %v was created on mount %v as part of this command. " + - // "If you do not wish to use this issuer, consider deleting it by calling `vault delete %v/issuer/%v`", - // state.certIssuerId, state.intermediateMount, state.intermediateMount, state.certIssuerId) - //} - - return message -} diff --git a/command/pki_issue_intermediate_test.go b/command/pki_issue_intermediate_test.go deleted file mode 100644 index 58f9e6271105d..0000000000000 --- a/command/pki_issue_intermediate_test.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "encoding/json" - "testing" - - "github.com/hashicorp/vault/api" -) - -func TestPKIIssueIntermediate(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3 - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - createComplicatedIssuerSetUpWithIssueIntermediate(t, client) - - runPkiVerifySignTests(t, client) - - runPkiListIntermediateTests(t, client) -} - -func createComplicatedIssuerSetUpWithIssueIntermediate(t *testing.T, client *api.Client) { - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3 - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - - if err := client.Sys().Mount("pki-root", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if err := client.Sys().Mount("pki-newroot", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if err := client.Sys().Mount("pki-int", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - // Used to check handling empty list responses: Not Used for Any Issuers / Certificates - if err := client.Sys().Mount("pki-empty", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{}, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX1", - "key_name": "rootX1", - }) - if err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX2", - }) - if err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX3", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ - "common_name": "Root X4", - "ttl": "3650d", - "issuer_name": "rootX4", - "key_ref": "rootX1", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - // Next we create the Intermediates Using the Issue Intermediate Command - stdout := bytes.NewBuffer(nil) - stderr := bytes.NewBuffer(nil) - runOpts := &RunOptions{ - Stdout: stdout, - Stderr: stderr, - Client: client, - } - - // Intermediate X1 - intX1CallArgs := []string{ - "pki", "issue", "-format=json", "-issuer_name=intX1", - "pki-root/issuer/rootX1", - "pki-int/", - "key_type=rsa", - "common_name=Int X1", - "ttl=3650d", - } - codeOut := RunCustom(intX1CallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } - - // Intermediate X2 - intX2CallArgs := []string{ - "pki", "issue", "-format=json", "-issuer_name=intX2", - "pki-newroot/issuer/rootX3", - "pki-int/", - "key_type=ec", - "common_name=Int X2", - "ttl=3650d", - } - codeOut = RunCustom(intX2CallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } - - // Intermediate X3 - // Clear Buffers so that we can unmarshall json of just this call - stdout = bytes.NewBuffer(nil) - stderr = bytes.NewBuffer(nil) - runOpts = &RunOptions{ - Stdout: stdout, - Stderr: stderr, - Client: client, - } - intX3OriginalCallArgs := []string{ - "pki", "issue", "-format=json", "-issuer_name=intX3", - "pki-int/issuer/intX1", - "pki-int/", - "key_type=rsa", - "common_name=Int X3", - "ttl=3650d", - } - codeOut = RunCustom(intX3OriginalCallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } - var intX3Resp map[string]interface{} - json.Unmarshal(stdout.Bytes(), &intX3Resp) - intX3Data := intX3Resp["data"].(map[string]interface{}) - keyId := intX3Data["key_id"].(string) - - intX3AdaptedCallArgs := []string{ - "pki", "issue", "-format=json", "-issuer_name=intX3also", "-type=existing", - "pki-int/issuer/intX2", - "pki-int/", - "key_ref=" + keyId, - "common_name=Int X3", - "ttl=3650d", - } - codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } -} diff --git a/command/pki_list_intermediate.go b/command/pki_list_intermediate.go deleted file mode 100644 index c62c580806919..0000000000000 --- a/command/pki_list_intermediate.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/vault/api" - - "github.com/ghodss/yaml" - "github.com/ryanuber/columnize" -) - -type PKIListIntermediateCommand struct { - *BaseCommand - - flagConfig string - flagReturnIndicator string - flagDefaultDisabled bool - flagList bool - - flagUseNames bool - - flagSignatureMatch bool - flagIndirectSignMatch bool - flagKeyIdMatch bool - flagSubjectMatch bool - flagPathMatch bool -} - -func (c *PKIListIntermediateCommand) Synopsis() string { - return "Determine which of a list of certificates, were issued by a given parent certificate" -} - -func (c *PKIListIntermediateCommand) Help() string { - helpText := ` -Usage: vault pki list-intermediates PARENT [CHILD] [CHILD] [CHILD] ... - - Lists the set of intermediate CAs issued by this parent issuer. - - PARENT is the certificate that might be the issuer that everything should - be verified against. - - CHILD is an optional list of paths to certificates to be compared to the - PARENT, or pki mounts to look for certificates on. If CHILD is omitted - entirely, the list will be constructed from all accessible pki mounts. - - This returns a list of issuing certificates, and whether they are a match. - By default, the type of match required is whether the PARENT has the - expected subject, key_id, and could have (directly) signed this issuer. - The match criteria can be updated by changed the corresponding flag. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *PKIListIntermediateCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - f := set.NewFlagSet("Command Options") - - f.BoolVar(&BoolVar{ - Name: "subject_match", - Target: &c.flagSubjectMatch, - Default: true, - EnvVar: "", - Usage: `Whether the subject name of the potential parent cert matches the issuer name of the child cert.`, - }) - - f.BoolVar(&BoolVar{ - Name: "key_id_match", - Target: &c.flagKeyIdMatch, - Default: true, - EnvVar: "", - Usage: `Whether the subject key id (SKID) of the potential parent cert matches the authority key id (AKID) of the child cert.`, - }) - - f.BoolVar(&BoolVar{ - Name: "path_match", - Target: &c.flagPathMatch, - Default: false, - EnvVar: "", - Usage: `Whether the potential parent appears in the certificate chain field (ca_chain) of the issued cert.`, - }) - - f.BoolVar(&BoolVar{ - Name: "direct_sign", - Target: &c.flagSignatureMatch, - Default: true, - EnvVar: "", - Usage: `Whether the key of the potential parent directly signed this issued certificate.`, - }) - - f.BoolVar(&BoolVar{ - Name: "indirect_sign", - Target: &c.flagIndirectSignMatch, - Default: true, - EnvVar: "", - Usage: `Whether trusting the parent certificate is sufficient to trust the child certificate.`, - }) - - f.BoolVar(&BoolVar{ - Name: "use_names", - Target: &c.flagUseNames, - Default: false, - EnvVar: "", - Usage: `Whether the list of issuers returned is referred to by name (when it exists) rather than by uuid.`, - }) - - return set -} - -func (c *PKIListIntermediateCommand) Run(args []string) int { - f := c.Flags() - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - - if len(args) < 1 { - c.UI.Error("Not enough arguments (expected potential parent, got nothing)") - return 1 - } else if len(args) > 2 { - for _, arg := range args { - if strings.HasPrefix(arg, "-") { - c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) - break - } - } - } - - client, err := c.Client() - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) - return 1 - } - - issuer := sanitizePath(args[0]) - var issued []string - if len(args) > 1 { - for _, arg := range args[1:] { - cleanPath := sanitizePath(arg) - // Arg Might be a Fully Qualified Path - if strings.Contains(cleanPath, "/issuer/") || - strings.Contains(cleanPath, "/certs/") || - strings.Contains(cleanPath, "/revoked/") { - issued = append(issued, cleanPath) - } else { // Or Arg Might be a Mount - mountCaList, err := c.getIssuerListFromMount(client, arg) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - issued = append(issued, mountCaList...) - } - } - } else { - mountListRaw, err := client.Logical().Read("/sys/mounts/") - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to Read List of Mounts With Potential Issuers: %v", err)) - return 1 - } - for path, rawValueMap := range mountListRaw.Data { - valueMap := rawValueMap.(map[string]interface{}) - if valueMap["type"].(string) == "pki" { - mountCaList, err := c.getIssuerListFromMount(client, sanitizePath(path)) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - issued = append(issued, mountCaList...) - } - } - } - - childrenMatches := make(map[string]bool) - - constraintMap := map[string]bool{ - // This comparison isn't strictly correct, despite a standard ordering these are sets - "subject_match": c.flagSubjectMatch, - "path_match": c.flagPathMatch, - "trust_match": c.flagIndirectSignMatch, - "key_id_match": c.flagKeyIdMatch, - "signature_match": c.flagSignatureMatch, - } - - issuerResp, err := readIssuer(client, issuer) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to read parent issuer on path %s: %s", issuer, err.Error())) - return 1 - } - - for _, child := range issued { - path := sanitizePath(child) - if path != "" { - verifyResults, err := verifySignBetween(client, issuerResp, path) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to run verification on path %v: %v", path, err)) - return 1 - } - childrenMatches[path] = checkIfResultsMatchFilters(verifyResults, constraintMap) - } - } - - err = c.outputResults(childrenMatches) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - return 0 -} - -func (c *PKIListIntermediateCommand) getIssuerListFromMount(client *api.Client, mountString string) ([]string, error) { - var issuerList []string - issuerListEndpoint := sanitizePath(mountString) + "/issuers" - rawIssuersResp, err := client.Logical().List(issuerListEndpoint) - if err != nil { - return issuerList, fmt.Errorf("failed to read list of issuers within mount %v: %v", mountString, err) - } - if rawIssuersResp == nil { // No Issuers (Empty Mount) - return issuerList, nil - } - issuersMap := rawIssuersResp.Data["keys"] - certList := issuersMap.([]interface{}) - for _, certId := range certList { - identifier := certId.(string) - if c.flagUseNames { - issuerReadResp, err := client.Logical().Read(sanitizePath(mountString) + "/issuer/" + identifier) - if err != nil { - c.UI.Warn(fmt.Sprintf("Unable to Fetch Issuer to Recover Name at: %v", sanitizePath(mountString)+"/issuer/"+identifier)) - } - if issuerReadResp != nil { - issuerName := issuerReadResp.Data["issuer_name"].(string) - if issuerName != "" { - identifier = issuerName - } - } - } - issuerList = append(issuerList, sanitizePath(mountString)+"/issuer/"+identifier) - } - return issuerList, nil -} - -func checkIfResultsMatchFilters(verifyResults, constraintMap map[string]bool) bool { - for key, required := range constraintMap { - if required && !verifyResults[key] { - return false - } - } - return true -} - -func (c *PKIListIntermediateCommand) outputResults(results map[string]bool) error { - switch Format(c.UI) { - case "", "table": - return c.outputResultsTable(results) - case "json": - return c.outputResultsJSON(results) - case "yaml": - return c.outputResultsYAML(results) - default: - return fmt.Errorf("unknown output format: %v", Format(c.UI)) - } -} - -func (c *PKIListIntermediateCommand) outputResultsTable(results map[string]bool) error { - data := []string{"intermediate" + hopeDelim + "match?"} - for field, finding := range results { - row := field + hopeDelim + strconv.FormatBool(finding) - data = append(data, row) - } - c.UI.Output(tableOutput(data, &columnize.Config{ - Delim: hopeDelim, - })) - c.UI.Output("\n") - - return nil -} - -func (c *PKIListIntermediateCommand) outputResultsJSON(results map[string]bool) error { - bytes, err := json.MarshalIndent(results, "", " ") - if err != nil { - return err - } - - c.UI.Output(string(bytes)) - return nil -} - -func (c *PKIListIntermediateCommand) outputResultsYAML(results map[string]bool) error { - bytes, err := yaml.Marshal(results) - if err != nil { - return err - } - - c.UI.Output(string(bytes)) - return nil -} diff --git a/command/pki_list_intermediate_test.go b/command/pki_list_intermediate_test.go deleted file mode 100644 index d494c193387ac..0000000000000 --- a/command/pki_list_intermediate_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "strings" - "testing" - - "github.com/hashicorp/vault/api" -) - -func TestPKIListIntermediate(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3(also) - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - createComplicatedIssuerSetUp(t, client) - - runPkiListIntermediateTests(t, client) -} - -func runPkiListIntermediateTests(t *testing.T, client *api.Client) { - cases := []struct { - name string - args []string - expectedMatches map[string]bool - jsonOut bool - shouldError bool - expectErrorCont string - expectErrorNotCont string - nonJsonOutputCont string - }{ - { - "rootX1-match-everything-no-constraints", - []string{ - "pki", "list-intermediates", "-format=json", "-use_names=true", - "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=false", - "pki-root/issuer/rootX1", - }, - map[string]bool{ - "pki-root/issuer/rootX1": true, - "pki-root/issuer/rootX2": true, - "pki-newroot/issuer/rootX3": true, - "pki-root/issuer/rootX4": true, - "pki-int/issuer/intX1": true, - "pki-int/issuer/intX2": true, - "pki-int/issuer/intX3": true, - "pki-int/issuer/intX3also": true, - "pki-int/issuer/rootX1": true, - "pki-int/issuer/rootX3": true, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-default-children", - []string{"pki", "list-intermediates", "-format=json", "-use_names=true", "pki-root/issuer/rootX1"}, - map[string]bool{ - "pki-root/issuer/rootX1": true, - "pki-root/issuer/rootX2": false, - "pki-newroot/issuer/rootX3": false, - "pki-root/issuer/rootX4": false, - "pki-int/issuer/intX1": true, - "pki-int/issuer/intX2": false, - "pki-int/issuer/intX3": false, - "pki-int/issuer/intX3also": false, - "pki-int/issuer/rootX1": true, - "pki-int/issuer/rootX3": false, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-subject-match-only", - []string{ - "pki", "list-intermediates", "-format=json", "-use_names=true", - "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", - "pki-root/issuer/rootX1", - }, - map[string]bool{ - "pki-root/issuer/rootX1": true, - "pki-root/issuer/rootX2": true, - "pki-newroot/issuer/rootX3": true, - "pki-root/issuer/rootX4": false, - "pki-int/issuer/intX1": true, - "pki-int/issuer/intX2": true, - "pki-int/issuer/intX3": false, - "pki-int/issuer/intX3also": false, - "pki-int/issuer/rootX1": true, - "pki-int/issuer/rootX3": true, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-in-path", - []string{ - "pki", "list-intermediates", "-format=json", "-use_names=true", - "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=true", - "pki-root/issuer/rootX1", - }, - map[string]bool{ - "pki-root/issuer/rootX1": true, - "pki-root/issuer/rootX2": false, - "pki-newroot/issuer/rootX3": false, - "pki-root/issuer/rootX4": false, - "pki-int/issuer/intX1": true, - "pki-int/issuer/intX2": false, - "pki-int/issuer/intX3": true, - "pki-int/issuer/intX3also": false, - "pki-int/issuer/rootX1": true, - "pki-int/issuer/rootX3": false, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-only-int-mount", - []string{ - "pki", "list-intermediates", "-format=json", "-use_names=true", - "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=true", - "pki-root/issuer/rootX1", "pki-int/", - }, - map[string]bool{ - "pki-int/issuer/intX1": true, - "pki-int/issuer/intX2": false, - "pki-int/issuer/intX3": true, - "pki-int/issuer/intX3also": false, - "pki-int/issuer/rootX1": true, - "pki-int/issuer/rootX3": false, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-subject-match-root-mounts-only", - []string{ - "pki", "list-intermediates", "-format=json", "-use_names=true", - "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", - "pki-root/issuer/rootX1", "pki-root/", "pki-newroot", "pki-empty", - }, - map[string]bool{ - "pki-root/issuer/rootX1": true, - "pki-root/issuer/rootX2": true, - "pki-newroot/issuer/rootX3": true, - "pki-root/issuer/rootX4": false, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-subject-match-these-certs-only", - []string{ - "pki", "list-intermediates", "-format=json", "-use_names=true", - "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", - "pki-root/issuer/rootX1", "pki-root/issuer/rootX2", "pki-newroot/issuer/rootX3", "pki-root/issuer/rootX4", - }, - map[string]bool{ - "pki-root/issuer/rootX2": true, - "pki-newroot/issuer/rootX3": true, - "pki-root/issuer/rootX4": false, - }, - true, - false, - "", - "", - "", - }, - } - for _, testCase := range cases { - var errString string - var results map[string]interface{} - var stdOut string - - if testCase.jsonOut { - results, errString = execPKIVerifyJson(t, client, false, testCase.shouldError, testCase.args) - } else { - stdOut, errString = execPKIVerifyNonJson(t, client, testCase.shouldError, testCase.args) - } - - // Verify Error Behavior - if testCase.shouldError { - if errString == "" { - t.Fatalf("Expected error in Testcase %s : no error produced, got results %s", testCase.name, results) - } - if testCase.expectErrorCont != "" && !strings.Contains(errString, testCase.expectErrorCont) { - t.Fatalf("Expected error in Testcase %s to contain %s, but got error %s", testCase.name, testCase.expectErrorCont, errString) - } - if testCase.expectErrorNotCont != "" && strings.Contains(errString, testCase.expectErrorNotCont) { - t.Fatalf("Expected error in Testcase %s to not contain %s, but got error %s", testCase.name, testCase.expectErrorNotCont, errString) - } - } else { - if errString != "" { - t.Fatalf("Error in Testcase %s : no error expected, but got error: %s", testCase.name, errString) - } - } - - // Verify Output - if testCase.jsonOut { - isMatch, errString := verifyExpectedJson(testCase.expectedMatches, results) - if !isMatch { - t.Fatalf("Expected Results for Testcase %s, do not match returned results %s", testCase.name, errString) - } - } else { - if !strings.Contains(stdOut, testCase.nonJsonOutputCont) { - t.Fatalf("Expected standard output for Testcase %s to contain %s, but got %s", testCase.name, testCase.nonJsonOutputCont, stdOut) - } - } - - } -} diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go deleted file mode 100644 index 852c0c0f1d24b..0000000000000 --- a/command/pki_reissue_intermediate.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/hex" - "fmt" - "io" - "net" - "net/url" - "os" - "strings" - - "github.com/posener/complete" -) - -type PKIReIssueCACommand struct { - *BaseCommand - - flagConfig string - flagReturnIndicator string - flagDefaultDisabled bool - flagList bool - - flagKeyStorageSource string - flagNewIssuerName string -} - -func (c *PKIReIssueCACommand) Synopsis() string { - return "Uses a parent certificate and a template certificate to create a new issuer on a child mount" -} - -func (c *PKIReIssueCACommand) Help() string { - helpText := ` -Usage: vault pki reissue PARENT TEMPLATE CHILD_MOUNT options -` - return strings.TrimSpace(helpText) -} - -func (c *PKIReIssueCACommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "type", - Target: &c.flagKeyStorageSource, - Default: "internal", - EnvVar: "", - Usage: `Options are “existing” - to use an existing key inside vault, “internal” - to generate a new key inside vault, or “kms” - to link to an external key. Exported keys are not available through this API.`, - Completion: complete.PredictSet("internal", "existing", "kms"), - }) - - f.StringVar(&StringVar{ - Name: "issuer_name", - Target: &c.flagNewIssuerName, - Default: "", - EnvVar: "", - Usage: `If present, the newly created issuer will be given this name`, - }) - - return set -} - -func (c *PKIReIssueCACommand) Run(args []string) int { - // Parse Args - f := c.Flags() - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - args = f.Args() - - if len(args) < 3 { - c.UI.Error("Not enough arguments: expected parent issuer and child-mount location and some key_value argument") - return 1 - } - - stdin := (io.Reader)(os.Stdin) - userData, err := parseArgsData(stdin, args[3:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) - return 1 - } - - // Check We Have a Client - client, err := c.Client() - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) - return 1 - } - - parentIssuer := sanitizePath(args[0]) // /pki/issuer/default - templateIssuer := sanitizePath(args[1]) - intermediateMount := sanitizePath(args[2]) - - templateIssuerBundle, err := readIssuer(client, templateIssuer) - if err != nil { - c.UI.Error(fmt.Sprintf("Error fetching template certificate %v : %v", templateIssuer, err)) - return 1 - } - certificate := templateIssuerBundle.certificate - - useExistingKey := c.flagKeyStorageSource == "existing" - keyRef := "" - if useExistingKey { - keyRef = templateIssuerBundle.keyId - - if keyRef == "" { - c.UI.Error(fmt.Sprintf("Template issuer %s did not have a key id field set in response which is required", templateIssuer)) - return 1 - } - } - - templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef) - data := updateTemplateWithData(templateData, userData) - - return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) -} - -func updateTemplateWithData(template map[string]interface{}, changes map[string]interface{}) map[string]interface{} { - data := map[string]interface{}{} - - for key, value := range template { - data[key] = value - } - - // ttl and not_after set the same thing. Delete template ttl if using not_after: - if _, ok := changes["not_after"]; ok { - delete(data, "ttl") - } - - // If we are updating the key_type, do not set key_bits - if _, ok := changes["key_type"]; ok && changes["key_type"] != template["key_type"] { - delete(data, "key_bits") - } - - for key, value := range changes { - data[key] = value - } - - return data -} - -func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, keyRef string) (templateData map[string]interface{}, err error) { - // Generate Certificate Signing Parameters - templateData = map[string]interface{}{ - "common_name": certificate.Subject.CommonName, - "alt_names": makeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), - "ip_sans": makeIpAddressCommaSeparatedString(certificate.IPAddresses), - "uri_sans": makeUriCommaSeparatedString(certificate.URIs), - // other_sans (string: "") - Specifies custom OID/UTF8-string SANs. These must match values specified on the role in allowed_other_sans (see role creation for allowed_other_sans globbing rules). The format is the same as OpenSSL: ;: where the only current valid type is UTF8. This can be a comma-delimited list or a JSON string slice. - // Punting on Other_SANs, shouldn't really be on CAs - "signature_bits": findSignatureBits(certificate.SignatureAlgorithm), - "exclude_cn_from_sans": determineExcludeCnFromSans(certificate), - "ou": certificate.Subject.OrganizationalUnit, - "organization": certificate.Subject.Organization, - "country": certificate.Subject.Country, - "locality": certificate.Subject.Locality, - "province": certificate.Subject.Province, - "street_address": certificate.Subject.StreetAddress, - "postal_code": certificate.Subject.PostalCode, - "serial_number": certificate.Subject.SerialNumber, - "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), - "max_path_length": certificate.MaxPathLen, - "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), - "use_pss": isPSS(certificate.SignatureAlgorithm), - } - - if useExistingKey { - templateData["skid"] = hex.EncodeToString(certificate.SubjectKeyId) // TODO: Double Check this with someone - if keyRef == "" { - return nil, fmt.Errorf("unable to create certificate template for existing key without a key_id") - } - templateData["key_ref"] = keyRef - } else { - templateData["key_type"] = getKeyType(certificate.PublicKeyAlgorithm.String()) - templateData["key_bits"] = findBitLength(certificate.PublicKey) - } - - return templateData, nil -} - -func isPSS(algorithm x509.SignatureAlgorithm) bool { - switch algorithm { - case x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS, x509.SHA256WithRSAPSS: - return true - default: - return false - } -} - -func makeAltNamesCommaSeparatedString(names []string, emails []string) string { - return strings.Join(names, ",") + "," + strings.Join(emails, ",") -} - -func makeUriCommaSeparatedString(uris []*url.URL) string { - stringAddresses := make([]string, len(uris)) - for i, uri := range uris { - stringAddresses[i] = uri.String() - } - return strings.Join(stringAddresses, ",") -} - -func makeIpAddressCommaSeparatedString(addresses []net.IP) string { - stringAddresses := make([]string, len(addresses)) - for i, address := range addresses { - stringAddresses[i] = address.String() - } - return strings.Join(stringAddresses, ",") -} - -func determineExcludeCnFromSans(certificate x509.Certificate) bool { - cn := certificate.Subject.CommonName - if cn == "" { - return false - } - - emails := certificate.EmailAddresses - for _, email := range emails { - if email == cn { - return false - } - } - - dnses := certificate.DNSNames - for _, dns := range dnses { - if dns == cn { - return false - } - } - - return true -} - -func findBitLength(publicKey any) int { - if publicKey == nil { - return 0 - } - switch pub := publicKey.(type) { - case *rsa.PublicKey: - return pub.N.BitLen() - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P224(): - return 224 - case elliptic.P256(): - return 256 - case elliptic.P384(): - return 384 - case elliptic.P521(): - return 521 - default: - return 0 - } - default: - return 0 - } -} - -func findSignatureBits(algo x509.SignatureAlgorithm) int { - switch algo { - case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1, x509.ECDSAWithSHA1: - return -1 - case x509.SHA256WithRSA, x509.DSAWithSHA256, x509.ECDSAWithSHA256, x509.SHA256WithRSAPSS: - return 256 - case x509.SHA384WithRSA, x509.ECDSAWithSHA384, x509.SHA384WithRSAPSS: - return 384 - case x509.SHA512WithRSA, x509.SHA512WithRSAPSS, x509.ECDSAWithSHA512: - return 512 - case x509.PureEd25519: - return 0 - default: - return -1 - } -} - -func getKeyType(goKeyType string) string { - switch goKeyType { - case "RSA": - return "rsa" - case "ECDSA": - return "ec" - case "Ed25519": - return "ed25519" - default: - return "" - } -} diff --git a/command/pki_reissue_intermediate_test.go b/command/pki_reissue_intermediate_test.go deleted file mode 100644 index e485f04d322b2..0000000000000 --- a/command/pki_reissue_intermediate_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "testing" - - "github.com/hashicorp/vault/api" -) - -// TestPKIReIssueIntermediate tests that the pki reissue command line tool accurately copies information from the -// template certificate to the newly issued certificate, by issuing and reissuing several certificates and seeing how -// they related to each other. -func TestPKIReIssueIntermediate(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3 - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - createComplicatedIssuerSetUpWithReIssueIntermediate(t, client) - - runPkiVerifySignTests(t, client) - - runPkiListIntermediateTests(t, client) -} - -func createComplicatedIssuerSetUpWithReIssueIntermediate(t *testing.T, client *api.Client) { - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3 - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - - if err := client.Sys().Mount("pki-root", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if err := client.Sys().Mount("pki-newroot", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if err := client.Sys().Mount("pki-int", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - // Used to check handling empty list responses: Not Used for Any Issuers / Certificates - if err := client.Sys().Mount("pki-empty", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{}, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX1", - "key_name": "rootX1", - }) - if err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX2", - }) - if err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX3", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ - "common_name": "Root X4", - "ttl": "3650d", - "issuer_name": "rootX4", - "key_ref": "rootX1", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - stdout := bytes.NewBuffer(nil) - stderr := bytes.NewBuffer(nil) - runOpts := &RunOptions{ - Stdout: stdout, - Stderr: stderr, - Client: client, - } - - // Intermediate X1 - intX1CallArgs := []string{ - "pki", "issue", "-format=json", "-issuer_name=intX1", - "pki-root/issuer/rootX1", - "pki-int/", - "key_type=rsa", - "common_name=Int X1", - "ou=thing", - "ttl=3650d", - } - codeOut := RunCustom(intX1CallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } - - // Intermediate X2 - using ReIssue - intX2CallArgs := []string{ - "pki", "reissue", "-format=json", "-issuer_name=intX2", - "pki-newroot/issuer/rootX3", - "pki-int/issuer/intX1", - "pki-int/", - "key_type=ec", - "common_name=Int X2", - } - codeOut = RunCustom(intX2CallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } - - // Intermediate X3 - intX3OriginalCallArgs := []string{ - "pki", "issue", "-format=json", "-issuer_name=intX3", - "pki-int/issuer/intX1", - "pki-int/", - "key_type=ec", - "use_pss=true", // This is meaningful because rootX1 is an RSA key - "signature_bits=512", - "common_name=Int X3", - "ttl=3650d", - } - codeOut = RunCustom(intX3OriginalCallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } - - intX3AdaptedCallArgs := []string{ - "pki", "reissue", "-format=json", "-issuer_name=intX3also", "-type=existing", - "pki-int/issuer/intX2", // This is a EC key - "pki-int/issuer/intX3", // This template includes use_pss = true which can't be accomodated - "pki-int/", - } - codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) - if codeOut != 0 { - t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) - } -} diff --git a/command/pki_verify_sign.go b/command/pki_verify_sign.go deleted file mode 100644 index b5a864f12c9ff..0000000000000 --- a/command/pki_verify_sign.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "crypto/x509" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/vault/command/healthcheck" - - "github.com/ghodss/yaml" - "github.com/hashicorp/vault/api" - "github.com/ryanuber/columnize" -) - -type PKIVerifySignCommand struct { - *BaseCommand - - flagConfig string - flagReturnIndicator string - flagDefaultDisabled bool - flagList bool -} - -func (c *PKIVerifySignCommand) Synopsis() string { - return "Check whether one certificate validates another specified certificate" -} - -func (c *PKIVerifySignCommand) Help() string { - helpText := ` -Usage: vault pki verify-sign POSSIBLE-ISSUER POSSIBLE-ISSUED - - Verifies whether the listed issuer has signed the listed issued certificate. - - POSSIBLE-ISSUER and POSSIBLE-ISSUED are the fully name-spaced path to - an issuer certificate, for instance: 'ns1/mount1/issuer/issuerName/json'. - - Returns five fields of information: - - - signature_match: was the key of the issuer used to sign the issued. - - path_match: the possible issuer appears in the valid certificate chain - of the issued. - - key_id_match: does the key-id of the issuer match the key_id of the - subject. - - subject_match: does the subject name of the issuer match the issuer - subject of the issued. - - trust_match: if someone trusted the parent issuer, is the chain - provided sufficient to trust the child issued. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *PKIVerifySignCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - return set -} - -func (c *PKIVerifySignCommand) Run(args []string) int { - f := c.Flags() - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - - if len(args) < 2 { - if len(args) == 0 { - c.UI.Error("Not enough arguments (expected potential issuer and issued, got nothing)") - } else { - c.UI.Error("Not enough arguments (expected both potential issuer and issued, got only one)") - } - return 1 - } else if len(args) > 2 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected only potential issuer and issued, got %d arguments)", len(args))) - for _, arg := range args { - if strings.HasPrefix(arg, "-") { - c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) - break - } - } - return 1 - } - - issuer := sanitizePath(args[0]) - issued := sanitizePath(args[1]) - - client, err := c.Client() - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) - return 1 - } - - issuerResp, err := readIssuer(client, issuer) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to read issuer: %s: %s", issuer, err.Error())) - return 1 - } - - results, err := verifySignBetween(client, issuerResp, issued) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to run verification: %v", err)) - return pkiRetUsage - } - - c.outputResults(results, issuer, issued) - - return 0 -} - -func verifySignBetween(client *api.Client, issuerResp *issuerResponse, issuedPath string) (map[string]bool, error) { - // Note that this eats warnings - - issuerCert := issuerResp.certificate - issuerKeyId := issuerCert.SubjectKeyId - - // Fetch and Parse the Potential Issued Cert - issuedCertBundle, err := readIssuer(client, issuedPath) - if err != nil { - return nil, fmt.Errorf("error: unable to fetch issuer %v: %w", issuedPath, err) - } - parentKeyId := issuedCertBundle.certificate.AuthorityKeyId - - // Check the Chain-Match - rootCertPool := x509.NewCertPool() - rootCertPool.AddCert(issuerCert) - checkTrustPathOptions := x509.VerifyOptions{ - Roots: rootCertPool, - } - trust := false - trusts, err := issuedCertBundle.certificate.Verify(checkTrustPathOptions) - if err != nil && !strings.Contains(err.Error(), "certificate signed by unknown authority") { - return nil, err - } else if err == nil { - for _, chain := range trusts { - // Output of this Should Only Have One Trust with Chain of Length Two (Child followed by Parent) - for _, cert := range chain { - if issuedCertBundle.certificate.Equal(cert) { - trust = true - break - } - } - } - } - - pathMatch := false - for _, cert := range issuedCertBundle.caChain { - if bytes.Equal(cert.Raw, issuerCert.Raw) { - pathMatch = true - break - } - } - - signatureMatch := false - err = issuedCertBundle.certificate.CheckSignatureFrom(issuerCert) - if err == nil { - signatureMatch = true - } - - result := map[string]bool{ - // This comparison isn't strictly correct, despite a standard ordering these are sets - "subject_match": bytes.Equal(issuerCert.RawSubject, issuedCertBundle.certificate.RawIssuer), - "path_match": pathMatch, - "trust_match": trust, // TODO: Refactor into a reasonable function - "key_id_match": bytes.Equal(parentKeyId, issuerKeyId), - "signature_match": signatureMatch, - } - - return result, nil -} - -type issuerResponse struct { - keyId string - certificate *x509.Certificate - caChain []*x509.Certificate -} - -func readIssuer(client *api.Client, issuerPath string) (*issuerResponse, error) { - issuerResp, err := client.Logical().Read(issuerPath) - if err != nil { - return nil, err - } - issuerCertPem, err := requireStrRespField(issuerResp, "certificate") - if err != nil { - return nil, err - } - issuerCert, err := healthcheck.ParsePEMCert(issuerCertPem) - if err != nil { - return nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuerPath, err) - } - - caChainPem, err := requireStrListRespField(issuerResp, "ca_chain") - if err != nil { - return nil, fmt.Errorf("unable to parse issuer %v's CA chain: %w", issuerPath, err) - } - - var caChain []*x509.Certificate - for _, pem := range caChainPem { - trimmedPem := strings.TrimSpace(pem) - if trimmedPem == "" { - continue - } - cert, err := healthcheck.ParsePEMCert(trimmedPem) - if err != nil { - return nil, err - } - caChain = append(caChain, cert) - } - - keyId := optStrRespField(issuerResp, "key_id") - - return &issuerResponse{ - keyId: keyId, - certificate: issuerCert, - caChain: caChain, - }, nil -} - -func optStrRespField(resp *api.Secret, reqField string) string { - if resp == nil || resp.Data == nil { - return "" - } - if val, present := resp.Data[reqField]; !present { - return "" - } else if strVal, castOk := val.(string); !castOk || strVal == "" { - return "" - } else { - return strVal - } -} - -func requireStrRespField(resp *api.Secret, reqField string) (string, error) { - if resp == nil || resp.Data == nil { - return "", fmt.Errorf("nil response received, %s field unavailable", reqField) - } - if val, present := resp.Data[reqField]; !present { - return "", fmt.Errorf("response did not contain field: %s", reqField) - } else if strVal, castOk := val.(string); !castOk || strVal == "" { - return "", fmt.Errorf("field %s value was blank or not a string: %v", reqField, val) - } else { - return strVal, nil - } -} - -func requireStrListRespField(resp *api.Secret, reqField string) ([]string, error) { - if resp == nil || resp.Data == nil { - return nil, fmt.Errorf("nil response received, %s field unavailable", reqField) - } - if val, present := resp.Data[reqField]; !present { - return nil, fmt.Errorf("response did not contain field: %s", reqField) - } else { - return healthcheck.StringList(val) - } -} - -func (c *PKIVerifySignCommand) outputResults(results map[string]bool, potentialParent, potentialChild string) error { - switch Format(c.UI) { - case "", "table": - return c.outputResultsTable(results, potentialParent, potentialChild) - case "json": - return c.outputResultsJSON(results) - case "yaml": - return c.outputResultsYAML(results) - default: - return fmt.Errorf("unknown output format: %v", Format(c.UI)) - } -} - -func (c *PKIVerifySignCommand) outputResultsTable(results map[string]bool, potentialParent, potentialChild string) error { - c.UI.Output("issuer:" + potentialParent) - c.UI.Output("issued:" + potentialChild + "\n") - data := []string{"field" + hopeDelim + "value"} - for field, finding := range results { - row := field + hopeDelim + strconv.FormatBool(finding) - data = append(data, row) - } - c.UI.Output(tableOutput(data, &columnize.Config{ - Delim: hopeDelim, - })) - c.UI.Output("\n") - - return nil -} - -func (c *PKIVerifySignCommand) outputResultsJSON(results map[string]bool) error { - bytes, err := json.MarshalIndent(results, "", " ") - if err != nil { - return err - } - - c.UI.Output(string(bytes)) - return nil -} - -func (c *PKIVerifySignCommand) outputResultsYAML(results map[string]bool) error { - bytes, err := yaml.Marshal(results) - if err != nil { - return err - } - - c.UI.Output(string(bytes)) - return nil -} diff --git a/command/pki_verify_sign_test.go b/command/pki_verify_sign_test.go deleted file mode 100644 index 3f8986a5b6b16..0000000000000 --- a/command/pki_verify_sign_test.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - "testing" - - "github.com/hashicorp/vault/api" -) - -func TestPKIVerifySign(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3 - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - createComplicatedIssuerSetUp(t, client) - - runPkiVerifySignTests(t, client) -} - -func runPkiVerifySignTests(t *testing.T, client *api.Client) { - cases := []struct { - name string - args []string - expectedMatches map[string]bool - jsonOut bool - shouldError bool - expectErrorCont string - expectErrorNotCont string - nonJsonOutputCont string - }{ - { - "rootX1-matches-rootX1", - []string{"pki", "verify-sign", "-format=json", "pki-root/issuer/rootX1", "pki-root/issuer/rootX1"}, - map[string]bool{ - "key_id_match": true, - "path_match": true, - "signature_match": true, - "subject_match": true, - "trust_match": true, - }, - true, - false, - "", - "", - "", - }, - { - "rootX1-on-rootX2-onlySameName", - []string{"pki", "verify-sign", "-format=json", "pki-root/issuer/rootX1", "pki-root/issuer/rootX2"}, - map[string]bool{ - "key_id_match": false, - "path_match": false, - "signature_match": false, - "subject_match": true, - "trust_match": false, - }, - true, - false, - "", - "", - "", - }, - } - for _, testCase := range cases { - var errString string - var results map[string]interface{} - var stdOut string - - if testCase.jsonOut { - results, errString = execPKIVerifyJson(t, client, false, testCase.shouldError, testCase.args) - } else { - stdOut, errString = execPKIVerifyNonJson(t, client, testCase.shouldError, testCase.args) - } - - // Verify Error Behavior - if testCase.shouldError { - if errString == "" { - t.Fatalf("Expected error in Testcase %s : no error produced, got results %s", testCase.name, results) - } - if testCase.expectErrorCont != "" && !strings.Contains(errString, testCase.expectErrorCont) { - t.Fatalf("Expected error in Testcase %s to contain %s, but got error %s", testCase.name, testCase.expectErrorCont, errString) - } - if testCase.expectErrorNotCont != "" && strings.Contains(errString, testCase.expectErrorNotCont) { - t.Fatalf("Expected error in Testcase %s to not contain %s, but got error %s", testCase.name, testCase.expectErrorNotCont, errString) - } - } else { - if errString != "" { - t.Fatalf("Error in Testcase %s : no error expected, but got error: %s", testCase.name, errString) - } - } - - // Verify Output - if testCase.jsonOut { - isMatch, errString := verifyExpectedJson(testCase.expectedMatches, results) - if !isMatch { - t.Fatalf("Expected Results for Testcase %s, do not match returned results %s", testCase.name, errString) - } - } else { - if !strings.Contains(stdOut, testCase.nonJsonOutputCont) { - t.Fatalf("Expected standard output for Testcase %s to contain %s, but got %s", testCase.name, testCase.nonJsonOutputCont, stdOut) - } - } - - } -} - -func execPKIVerifyJson(t *testing.T, client *api.Client, expectErrorUnmarshalling bool, expectErrorOut bool, callArgs []string) (map[string]interface{}, string) { - stdout, stderr := execPKIVerifyNonJson(t, client, expectErrorOut, callArgs) - - var results map[string]interface{} - if err := json.Unmarshal([]byte(stdout), &results); err != nil && !expectErrorUnmarshalling { - t.Fatalf("failed to decode json response : %v \n json: \n%v", err, stdout) - } - - return results, stderr -} - -func execPKIVerifyNonJson(t *testing.T, client *api.Client, expectErrorOut bool, callArgs []string) (string, string) { - stdout := bytes.NewBuffer(nil) - stderr := bytes.NewBuffer(nil) - runOpts := &RunOptions{ - Stdout: stdout, - Stderr: stderr, - Client: client, - } - - code := RunCustom(callArgs, runOpts) - if !expectErrorOut && code != 0 { - t.Fatalf("running command `%v` unsuccessful (ret %v)\nerr: %v", strings.Join(callArgs, " "), code, stderr.String()) - } - - t.Log(stdout.String() + stderr.String()) - - return stdout.String(), stderr.String() -} - -func convertListOfInterfaceToString(list []interface{}, sep string) string { - newList := make([]string, len(list)) - for i, interfa := range list { - newList[i] = interfa.(string) - } - return strings.Join(newList, sep) -} - -func createComplicatedIssuerSetUp(t *testing.T, client *api.Client) { - // Relationship Map to Create - // pki-root | pki-newroot | pki-empty - // RootX1 RootX2 RootX4 RootX3 - // | | - // ---------------------------------------------- - // v v - // IntX1 IntX2 pki-int - // | | - // v v - // IntX3 (-----------------------) IntX3 - // - // Here X1,X2 have the same name (same mount) - // RootX4 uses the same key as RootX1 (but a different common_name/subject) - // RootX3 has the same name, and is on a different mount - // RootX1 has issued IntX1; RootX3 has issued IntX2 - - if err := client.Sys().Mount("pki-root", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if err := client.Sys().Mount("pki-newroot", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - if err := client.Sys().Mount("pki-int", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - MaxLeaseTTL: "36500d", - }, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - // Used to check handling empty list responses: Not Used for Any Issuers / Certificates - if err := client.Sys().Mount("pki-empty", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{}, - }); err != nil { - t.Fatalf("pki mount error: %#v", err) - } - - resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX1", - "key_name": "rootX1", - }) - if err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX2", - }) - if err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Root X", - "ttl": "3650d", - "issuer_name": "rootX3", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ - "common_name": "Root X4", - "ttl": "3650d", - "issuer_name": "rootX4", - "key_ref": "rootX1", - }); err != nil || resp == nil { - t.Fatalf("failed to prime CA: %v", err) - } - - // Intermediate X1 - int1CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ - "key_type": "rsa", - "common_name": "Int X1", - "ttl": "3650d", - }) - if err != nil || int1CsrResp == nil { - t.Fatalf("failed to generate CSR: %v", err) - } - int1KeyId, ok := int1CsrResp.Data["key_id"] - if !ok { - t.Fatalf("no key_id produced when generating csr, response %v", int1CsrResp.Data) - } - int1CsrRaw, ok := int1CsrResp.Data["csr"] - if !ok { - t.Fatalf("no csr produced when generating intermediate, resp: %v", int1CsrResp) - } - int1Csr := int1CsrRaw.(string) - int1CertResp, err := client.Logical().Write("pki-root/issuer/rootX1/sign-intermediate", map[string]interface{}{ - "csr": int1Csr, - }) - if err != nil || int1CertResp == nil { - t.Fatalf("failed to sign CSR: %v", err) - } - int1CertChainRaw, ok := int1CertResp.Data["ca_chain"] - if !ok { - t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int1CertResp) - } - int1CertChain := convertListOfInterfaceToString(int1CertChainRaw.([]interface{}), "\n") - importInt1Resp, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ - "pem_bundle": int1CertChain, - }) - if err != nil || importInt1Resp == nil { - t.Fatalf("failed to import certificate: %v", err) - } - importIssuerIdMap, ok := importInt1Resp.Data["mapping"] - if !ok { - t.Fatalf("no mapping data returned on issuer import: %v", importInt1Resp) - } - for key, value := range importIssuerIdMap.(map[string]interface{}) { - if value != nil && len(value.(string)) > 0 { - if value != int1KeyId { - t.Fatalf("Expected exactly one key_match to %v, got multiple: %v", int1KeyId, importIssuerIdMap) - } - if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ - "issuer_name": "intX1", - }); err != nil || resp == nil { - t.Fatalf("error naming issuer %v", err) - } - } else { - if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ - "issuer_name": "rootX1", - }); err != nil || resp == nil { - t.Fatalf("error naming issuer parent %v", err) - } - } - } - - // Intermediate X2 - int2CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "Int X2", - "ttl": "3650d", - }) - if err != nil || int2CsrResp == nil { - t.Fatalf("failed to generate CSR: %v", err) - } - int2KeyId, ok := int2CsrResp.Data["key_id"] - if !ok { - t.Fatalf("no key material returned from producing csr, resp: %v", int2CsrResp) - } - int2CsrRaw, ok := int2CsrResp.Data["csr"] - if !ok { - t.Fatalf("no csr produced when generating intermediate, resp: %v", int2CsrResp) - } - int2Csr := int2CsrRaw.(string) - int2CertResp, err := client.Logical().Write("pki-newroot/issuer/rootX3/sign-intermediate", map[string]interface{}{ - "csr": int2Csr, - }) - if err != nil || int2CertResp == nil { - t.Fatalf("failed to sign CSR: %v", err) - } - int2CertChainRaw, ok := int2CertResp.Data["ca_chain"] - if !ok { - t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int2CertResp) - } - int2CertChain := convertListOfInterfaceToString(int2CertChainRaw.([]interface{}), "\n") - importInt2Resp, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ - "pem_bundle": int2CertChain, - }) - if err != nil || importInt2Resp == nil { - t.Fatalf("failed to import certificate: %v", err) - } - importIssuer2IdMap, ok := importInt2Resp.Data["mapping"] - if !ok { - t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) - } - for key, value := range importIssuer2IdMap.(map[string]interface{}) { - if value != nil && len(value.(string)) > 0 { - if value != int2KeyId { - t.Fatalf("unexpected key_match with ca_chain, expected only %v, got %v", int2KeyId, importIssuer2IdMap) - } - if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ - "issuer_name": "intX2", - }); err != nil || resp == nil { - t.Fatalf("error naming issuer %v", err) - } - } else { - if resp, err := client.Logical().Write("pki-int/issuer/"+key, map[string]interface{}{ - "issuer_name": "rootX3", - }); err != nil || resp == nil { - t.Fatalf("error naming parent issuer %v", err) - } - } - } - - // Intermediate X3 - int3CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ - "key_type": "rsa", - "common_name": "Int X3", - "ttl": "3650d", - }) - if err != nil || int3CsrResp == nil { - t.Fatalf("failed to generate CSR: %v", err) - } - int3KeyId, ok := int3CsrResp.Data["key_id"] - int3CsrRaw, ok := int3CsrResp.Data["csr"] - if !ok { - t.Fatalf("no csr produced when generating intermediate, resp: %v", int3CsrResp) - } - int3Csr := int3CsrRaw.(string) - // sign by intX1 and import - int3CertResp1, err := client.Logical().Write("pki-int/issuer/intX1/sign-intermediate", map[string]interface{}{ - "csr": int3Csr, - }) - if err != nil || int3CertResp1 == nil { - t.Fatalf("failed to sign CSR: %v", err) - } - int3CertChainRaw1, ok := int3CertResp1.Data["ca_chain"] - if !ok { - t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int3CertResp1) - } - int3CertChain1 := convertListOfInterfaceToString(int3CertChainRaw1.([]interface{}), "\n") - importInt3Resp1, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ - "pem_bundle": int3CertChain1, - }) - if err != nil || importInt3Resp1 == nil { - t.Fatalf("failed to import certificate: %v", err) - } - importIssuer3IdMap1, ok := importInt3Resp1.Data["mapping"] - if !ok { - t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) - } - for key, value := range importIssuer3IdMap1.(map[string]interface{}) { - if value != nil && len(value.(string)) > 0 && value == int3KeyId { - if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ - "issuer_name": "intX3", - }); err != nil || resp == nil { - t.Fatalf("error naming issuer %v", err) - } - break - } - } - - // sign by intX2 and import - int3CertResp2, err := client.Logical().Write("pki-int/issuer/intX2/sign-intermediate", map[string]interface{}{ - "csr": int3Csr, - }) - if err != nil || int3CertResp2 == nil { - t.Fatalf("failed to sign CSR: %v", err) - } - int3CertChainRaw2, ok := int3CertResp2.Data["ca_chain"] - if !ok { - t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int3CertResp2) - } - int3CertChain2 := convertListOfInterfaceToString(int3CertChainRaw2.([]interface{}), "\n") - importInt3Resp2, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ - "pem_bundle": int3CertChain2, - }) - if err != nil || importInt3Resp2 == nil { - t.Fatalf("failed to import certificate: %v", err) - } - importIssuer3IdMap2, ok := importInt3Resp2.Data["mapping"] - if !ok { - t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) - } - for key, value := range importIssuer3IdMap2.(map[string]interface{}) { - if value != nil && len(value.(string)) > 0 && value == int3KeyId { - if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ - "issuer_name": "intX3also", - }); err != nil || resp == nil { - t.Fatalf("error naming issuer %v", err) - } - break // Parent Certs Already Named - } - } -} - -func verifyExpectedJson(expectedResults map[string]bool, results map[string]interface{}) (isMatch bool, error string) { - if len(expectedResults) != len(results) { - return false, fmt.Sprintf("Different Number of Keys in Expected Results (%d), than results (%d)", - len(expectedResults), len(results)) - } - for key, value := range expectedResults { - if results[key].(bool) != value { - return false, fmt.Sprintf("Different value for key %s : expected %t got %s", key, value, results[key]) - } - } - return true, "" -} diff --git a/command/plugin.go b/command/plugin.go index ca55a4bf73172..cf0a5009f6260 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/plugin_deregister.go b/command/plugin_deregister.go index 86b329f1063d1..a65bf6702a96d 100644 --- a/command/plugin_deregister.go +++ b/command/plugin_deregister.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -9,6 +6,7 @@ import ( semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -66,7 +64,7 @@ func (c *PluginDeregisterCommand) Flags() *FlagSets { } func (c *PluginDeregisterCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultPlugins(api.PluginTypeUnknown) + return c.PredictVaultPlugins(consts.PluginTypeUnknown) } func (c *PluginDeregisterCommand) AutocompleteFlags() complete.Flags { @@ -104,7 +102,7 @@ func (c *PluginDeregisterCommand) Run(args []string) int { return 2 } - pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + pluginType, err := consts.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/plugin_deregister_test.go b/command/plugin_deregister_test.go index b05644e46d4fe..fc3bc5801ebc5 100644 --- a/command/plugin_deregister_test.go +++ b/command/plugin_deregister_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -8,8 +5,8 @@ import ( "testing" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -80,21 +77,21 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") ui, cmd := testPluginDeregisterCommand(t) cmd.client = client if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ Name: pluginName, - Type: api.PluginTypeCredential, + Type: consts.PluginTypeCredential, Command: pluginName, SHA256: sha256Sum, }); err != nil { @@ -116,7 +113,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: api.PluginTypeCredential, + Type: consts.PluginTypeCredential, }) if err != nil { t.Fatal(err) @@ -138,14 +135,14 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration with version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, api.PluginTypeCredential) + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, consts.PluginTypeCredential) ui, cmd := testPluginDeregisterCommand(t) cmd.client = client @@ -166,7 +163,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: api.PluginTypeUnknown, + Type: consts.PluginTypeUnknown, }) if err != nil { t.Fatal(err) @@ -186,14 +183,14 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration with missing version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, api.PluginTypeCredential) + testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, consts.PluginTypeCredential) ui, cmd := testPluginDeregisterCommand(t) cmd.client = client @@ -213,7 +210,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: api.PluginTypeUnknown, + Type: consts.PluginTypeUnknown, }) if err != nil { t.Fatal(err) diff --git a/command/plugin_info.go b/command/plugin_info.go index 1fa9555ba9c27..8fedb98315358 100644 --- a/command/plugin_info.go +++ b/command/plugin_info.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -8,6 +5,7 @@ import ( "strings" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -60,7 +58,7 @@ func (c *PluginInfoCommand) Flags() *FlagSets { } func (c *PluginInfoCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultPlugins(api.PluginTypeUnknown) + return c.PredictVaultPlugins(consts.PluginTypeUnknown) } func (c *PluginInfoCommand) AutocompleteFlags() complete.Flags { @@ -100,7 +98,7 @@ func (c *PluginInfoCommand) Run(args []string) int { return 2 } - pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + pluginType, err := consts.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/plugin_info_test.go b/command/plugin_info_test.go index 921014cec538a..714ac1e59b812 100644 --- a/command/plugin_info_test.go +++ b/command/plugin_info_test.go @@ -1,16 +1,12 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "strings" "testing" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -42,7 +38,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { }, { "no_plugin_exist", - []string{api.PluginTypeCredential.String(), "not-a-real-plugin-like-ever"}, + []string{consts.PluginTypeCredential.String(), "not-a-real-plugin-like-ever"}, "Error reading plugin", 2, }, @@ -79,20 +75,20 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("default", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") ui, cmd := testPluginInfoCommand(t) cmd.client = client code := cmd.Run([]string{ - api.PluginTypeCredential.String(), pluginName, + consts.PluginTypeCredential.String(), pluginName, }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) @@ -110,14 +106,14 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("version flag", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() const pluginName = "azure" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "v1.0.0") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "v1.0.0") for name, tc := range map[string]struct { version string @@ -132,7 +128,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { code := cmd.Run([]string{ "-version=" + tc.version, - api.PluginTypeCredential.String(), pluginName, + consts.PluginTypeCredential.String(), pluginName, }) combined := ui.OutputWriter.String() + ui.ErrorWriter.String() @@ -156,21 +152,21 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("field", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") ui, cmd := testPluginInfoCommand(t) cmd.client = client code := cmd.Run([]string{ "-field", "builtin", - api.PluginTypeCredential.String(), pluginName, + consts.PluginTypeCredential.String(), pluginName, }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) @@ -192,7 +188,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { cmd.client = client code := cmd.Run([]string{ - api.PluginTypeCredential.String(), "my-plugin", + consts.PluginTypeCredential.String(), "my-plugin", }) if exp := 2; code != exp { t.Errorf("expected %d to be %d", code, exp) diff --git a/command/plugin_list.go b/command/plugin_list.go index f1b0e5ebfa04b..641c5e2bae982 100644 --- a/command/plugin_list.go +++ b/command/plugin_list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -8,6 +5,7 @@ import ( "strings" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -92,12 +90,12 @@ func (c *PluginListCommand) Run(args []string) int { return 1 } - pluginType := api.PluginTypeUnknown + pluginType := consts.PluginTypeUnknown if len(args) > 0 { pluginTypeStr := strings.TrimSpace(args[0]) if pluginTypeStr != "" { var err error - pluginType, err = api.ParsePluginType(pluginTypeStr) + pluginType, err = consts.ParsePluginType(pluginTypeStr) if err != nil { c.UI.Error(fmt.Sprintf("Error parsing type: %s", err)) return 2 @@ -141,10 +139,10 @@ func (c *PluginListCommand) Run(args []string) int { } } -func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, pluginType api.PluginType) []string { +func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, pluginType consts.PluginType) []string { var out []string switch pluginType { - case api.PluginTypeUnknown: + case consts.PluginTypeUnknown: out = []string{"Name | Type | Version"} for _, plugin := range plugins.Details { out = append(out, fmt.Sprintf("%s | %s | %s", plugin.Name, plugin.Type, plugin.Version)) diff --git a/command/plugin_list_test.go b/command/plugin_list_test.go index edae76558f374..8e4bbbff83e67 100644 --- a/command/plugin_list_test.go +++ b/command/plugin_list_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/plugin_register.go b/command/plugin_register.go index e9d2e5b7c6773..0c4510e3b99b2 100644 --- a/command/plugin_register.go +++ b/command/plugin_register.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -8,6 +5,7 @@ import ( "strings" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -94,7 +92,7 @@ func (c *PluginRegisterCommand) Flags() *FlagSets { } func (c *PluginRegisterCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultPlugins(api.PluginTypeUnknown) + return c.PredictVaultPlugins(consts.PluginTypeUnknown) } func (c *PluginRegisterCommand) AutocompleteFlags() complete.Flags { @@ -137,7 +135,7 @@ func (c *PluginRegisterCommand) Run(args []string) int { return 2 } - pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + pluginType, err := consts.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/plugin_register_test.go b/command/plugin_register_test.go index eccd5c1f00145..c2047d070f55a 100644 --- a/command/plugin_register_test.go +++ b/command/plugin_register_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -10,8 +7,8 @@ import ( "testing" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -83,7 +80,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) @@ -110,7 +107,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: api.PluginTypeCredential, + Type: consts.PluginTypeCredential, }) if err != nil { t.Fatal(err) @@ -132,7 +129,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration with version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) @@ -141,7 +138,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { const pluginName = "my-plugin" versions := []string{"v1.0.0", "v2.0.1"} _, sha256Sum := testPluginCreate(t, pluginDir, pluginName) - types := []api.PluginType{api.PluginTypeCredential, api.PluginTypeDatabase, api.PluginTypeSecrets} + types := []consts.PluginType{consts.PluginTypeCredential, consts.PluginTypeDatabase, consts.PluginTypeSecrets} for _, typ := range types { for _, version := range versions { @@ -167,17 +164,17 @@ func TestPluginRegisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: api.PluginTypeUnknown, + Type: consts.PluginTypeUnknown, }) if err != nil { t.Fatal(err) } - found := make(map[api.PluginType]int) - versionsFound := make(map[api.PluginType][]string) + found := make(map[consts.PluginType]int) + versionsFound := make(map[consts.PluginType][]string) for _, p := range resp.Details { if p.Name == pluginName { - typ, err := api.ParsePluginType(p.Type) + typ, err := consts.ParsePluginType(p.Type) if err != nil { t.Fatal(err) } diff --git a/command/plugin_reload.go b/command/plugin_reload.go index 2e95fdd1430db..ae3c663869fe5 100644 --- a/command/plugin_reload.go +++ b/command/plugin_reload.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/plugin_reload_status.go b/command/plugin_reload_status.go index e527a07d1bfba..319d539c15468 100644 --- a/command/plugin_reload_status.go +++ b/command/plugin_reload_status.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/plugin_reload_test.go b/command/plugin_reload_test.go index 646fda9245057..5713d1a1507b7 100644 --- a/command/plugin_reload_test.go +++ b/command/plugin_reload_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -8,7 +5,8 @@ import ( "testing" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -85,21 +83,21 @@ func TestPluginReloadCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") ui, cmd := testPluginReloadCommand(t) cmd.client = client if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ Name: pluginName, - Type: api.PluginTypeCredential, + Type: consts.PluginTypeCredential, Command: pluginName, SHA256: sha256Sum, }); err != nil { diff --git a/command/plugin_test.go b/command/plugin_test.go index 08c350cbd0882..cc83efc772a9c 100644 --- a/command/plugin_test.go +++ b/command/plugin_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -12,6 +9,7 @@ import ( "testing" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" ) // testPluginCreate creates a sample plugin in a tempdir and returns the shasum @@ -40,7 +38,7 @@ func testPluginCreate(tb testing.TB, dir, name string) (string, string) { } // testPluginCreateAndRegister creates a plugin and registers it in the catalog. -func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name string, pluginType api.PluginType, version string) (string, string) { +func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name string, pluginType consts.PluginType, version string) (string, string) { tb.Helper() pth, sha256Sum := testPluginCreate(tb, dir, name) @@ -59,7 +57,7 @@ func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name st } // testPluginCreateAndRegisterVersioned creates a versioned plugin and registers it in the catalog. -func testPluginCreateAndRegisterVersioned(tb testing.TB, client *api.Client, dir, name string, pluginType api.PluginType) (string, string, string) { +func testPluginCreateAndRegisterVersioned(tb testing.TB, client *api.Client, dir, name string, pluginType consts.PluginType) (string, string, string) { tb.Helper() pth, sha256Sum := testPluginCreate(tb, dir, name) diff --git a/command/policy.go b/command/policy.go index 289aae134a4ff..59ffdf0bfb3d4 100644 --- a/command/policy.go +++ b/command/policy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_delete.go b/command/policy_delete.go index 199fb74a96636..76fa9a21d0f86 100644 --- a/command/policy_delete.go +++ b/command/policy_delete.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_delete_test.go b/command/policy_delete_test.go index 008cd59766da3..2c822de9d4fdd 100644 --- a/command/policy_delete_test.go +++ b/command/policy_delete_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_fmt.go b/command/policy_fmt.go index 75a91791327c5..7912c10643d9c 100644 --- a/command/policy_fmt.go +++ b/command/policy_fmt.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_fmt_test.go b/command/policy_fmt_test.go index 89ed5215b6dad..2ae92ff6947e0 100644 --- a/command/policy_fmt_test.go +++ b/command/policy_fmt_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_list.go b/command/policy_list.go index 7b5bfc12c98e7..53e85df0fdb9b 100644 --- a/command/policy_list.go +++ b/command/policy_list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_list_test.go b/command/policy_list_test.go index 19766978c7330..70defe54ead7a 100644 --- a/command/policy_list_test.go +++ b/command/policy_list_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_read.go b/command/policy_read.go index 4f226444bea80..31777c5d5ae9e 100644 --- a/command/policy_read.go +++ b/command/policy_read.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_read_test.go b/command/policy_read_test.go index f091749176b50..8cd7c066b8ce2 100644 --- a/command/policy_read_test.go +++ b/command/policy_read_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_write.go b/command/policy_write.go index 81ff2b3e1993c..538414bc50fa8 100644 --- a/command/policy_write.go +++ b/command/policy_write.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/policy_write_test.go b/command/policy_write_test.go index 8294ef1934dbb..c8db7dc9ddc23 100644 --- a/command/policy_write_test.go +++ b/command/policy_write_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/print.go b/command/print.go index 19ac0a674dbc3..dace6ac951d68 100644 --- a/command/print.go +++ b/command/print.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/print_token.go b/command/print_token.go index 862af23e0b2d3..efe5aeedd3efd 100644 --- a/command/print_token.go +++ b/command/print_token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/proxy.go b/command/proxy.go deleted file mode 100644 index 0eee175af9703..0000000000000 --- a/command/proxy.go +++ /dev/null @@ -1,1116 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "context" - "crypto/tls" - "flag" - "fmt" - "io" - "net" - "net/http" - "os" - "sort" - "strings" - "sync" - "time" - - systemd "github.com/coreos/go-systemd/daemon" - ctconfig "github.com/hashicorp/consul-template/config" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/gatedwriter" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/go-secure-stdlib/reloadutil" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - cache "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" - "github.com/hashicorp/vault/command/agentproxyshared/winsvc" - proxyConfig "github.com/hashicorp/vault/command/proxy/config" - "github.com/hashicorp/vault/helper/logging" - "github.com/hashicorp/vault/helper/metricsutil" - "github.com/hashicorp/vault/helper/useragent" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/internalshared/listenerutil" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/version" - "github.com/kr/pretty" - "github.com/mitchellh/cli" - "github.com/oklog/run" - "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "google.golang.org/grpc/test/bufconn" -) - -var ( - _ cli.Command = (*ProxyCommand)(nil) - _ cli.CommandAutocomplete = (*ProxyCommand)(nil) -) - -const ( - // flagNameProxyExitAfterAuth is used as a Proxy specific flag to indicate - // that proxy should exit after a single successful auth - flagNameProxyExitAfterAuth = "exit-after-auth" -) - -type ProxyCommand struct { - *BaseCommand - logFlags logFlags - - config *proxyConfig.Config - - ShutdownCh chan struct{} - SighupCh chan struct{} - - tlsReloadFuncsLock sync.RWMutex - tlsReloadFuncs []reloadutil.ReloadFunc - - logWriter io.Writer - logGate *gatedwriter.Writer - logger log.Logger - - // Telemetry object - metricsHelper *metricsutil.MetricsHelper - - cleanupGuard sync.Once - - startedCh chan struct{} // for tests - reloadedCh chan struct{} // for tests - - flagConfigs []string - flagExitAfterAuth bool - flagTestVerifyOnly bool -} - -func (c *ProxyCommand) Synopsis() string { - return "Start a Vault Proxy" -} - -func (c *ProxyCommand) Help() string { - helpText := ` -Usage: vault proxy [options] - - This command starts a Vault Proxy that can perform automatic authentication - in certain environments. - - Start a proxy with a configuration file: - - $ vault proxy -config=/etc/vault/config.hcl - - For a full list of examples, please see the documentation. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *ProxyCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - f := set.NewFlagSet("Command Options") - - // Augment with the log flags - f.addLogFlags(&c.logFlags) - - f.StringSliceVar(&StringSliceVar{ - Name: "config", - Target: &c.flagConfigs, - Completion: complete.PredictOr( - complete.PredictFiles("*.hcl"), - complete.PredictFiles("*.json"), - ), - Usage: "Path to a configuration file. This configuration file should " + - "contain only proxy directives.", - }) - - f.BoolVar(&BoolVar{ - Name: flagNameProxyExitAfterAuth, - Target: &c.flagExitAfterAuth, - Default: false, - Usage: "If set to true, the proxy will exit with code 0 after a single " + - "successful auth, where success means that a token was retrieved and " + - "all sinks successfully wrote it", - }) - - // Internal-only flags to follow. - // - // Why hello there little source code reader! Welcome to the Vault source - // code. The remaining options are intentionally undocumented and come with - // no warranty or backwards-compatibility promise. Do not use these flags - // in production. Do not build automation using these flags. Unless you are - // developing against Vault, you should not need any of these flags. - f.BoolVar(&BoolVar{ - Name: "test-verify-only", - Target: &c.flagTestVerifyOnly, - Default: false, - Hidden: true, - }) - - // End internal-only flags. - - return set -} - -func (c *ProxyCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *ProxyCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *ProxyCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - // Create a logger. We wrap it in a gated writer so that it doesn't - // start logging too early. - c.logGate = gatedwriter.NewWriter(os.Stderr) - c.logWriter = c.logGate - - if c.logFlags.flagCombineLogs { - c.logWriter = os.Stdout - } - - // Validation - if len(c.flagConfigs) < 1 { - c.UI.Error("Must specify exactly at least one config path using -config") - return 1 - } - - config, err := c.loadConfig(c.flagConfigs) - if err != nil { - c.outputErrors(err) - return 1 - } - - if config.AutoAuth == nil { - c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") - } - - c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars - c.config = config - - l, err := c.newLogger() - if err != nil { - c.outputErrors(err) - return 1 - } - c.logger = l - - infoKeys := make([]string, 0, 10) - info := make(map[string]string) - info["log level"] = config.LogLevel - infoKeys = append(infoKeys, "log level") - - infoKeys = append(infoKeys, "version") - verInfo := version.GetVersion() - info["version"] = verInfo.FullVersionNumber(false) - if verInfo.Revision != "" { - info["version sha"] = strings.Trim(verInfo.Revision, "'") - infoKeys = append(infoKeys, "version sha") - } - infoKeys = append(infoKeys, "cgo") - info["cgo"] = "disabled" - if version.CgoEnabled { - info["cgo"] = "enabled" - } - - // Tests might not want to start a vault server and just want to verify - // the configuration. - if c.flagTestVerifyOnly { - if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { - c.UI.Output(fmt.Sprintf( - "\nConfiguration:\n%s\n", - pretty.Sprint(*c.config))) - } - return 0 - } - - // Ignore any setting of Agent/Proxy's address. This client is used by the Proxy - // to reach out to Vault. This should never loop back to the proxy. - c.flagAgentProxyAddress = "" - client, err := c.Client() - if err != nil { - c.UI.Error(fmt.Sprintf( - "Error fetching client: %v", - err)) - return 1 - } - - serverHealth, err := client.Sys().Health() - // We don't have any special behaviour if the error != nil, as this - // is not worth stopping the Proxy process over. - if err == nil { - // Note that we don't exit if the versions don't match, as this is a valid - // configuration, but we should still let the user know. - serverVersion := serverHealth.Version - proxyVersion := version.GetVersion().VersionNumber() - if serverVersion != proxyVersion { - c.UI.Info("==> Note: Vault Proxy version does not match Vault server version. " + - fmt.Sprintf("Vault Proxy version: %s, Vault server version: %s", proxyVersion, serverVersion)) - } - } - - // telemetry configuration - inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ - Config: config.Telemetry, - Ui: c.UI, - ServiceName: "vault", - DisplayName: "Vault", - UserAgent: useragent.ProxyString(), - ClusterName: config.ClusterName, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) - return 1 - } - c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) - - var method auth.AuthMethod - var sinks []*sink.SinkConfig - if config.AutoAuth != nil { - if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { - client.SetNamespace(config.AutoAuth.Method.Namespace) - } - - sinkClient, err := client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) - return 1 - } - - if config.DisableIdleConnsAutoAuth { - sinkClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - sinkClient.SetDisableKeepAlives(true) - } - - for _, sc := range config.AutoAuth.Sinks { - switch sc.Type { - case "file": - config := &sink.SinkConfig{ - Logger: c.logger.Named("sink.file"), - Config: sc.Config, - Client: sinkClient, - WrapTTL: sc.WrapTTL, - DHType: sc.DHType, - DeriveKey: sc.DeriveKey, - DHPath: sc.DHPath, - AAD: sc.AAD, - } - s, err := file.NewFileSink(config) - if err != nil { - c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) - return 1 - } - config.Sink = s - sinks = append(sinks, config) - default: - c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) - return 1 - } - } - - authConfig := &auth.AuthConfig{ - Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), - MountPath: config.AutoAuth.Method.MountPath, - Config: config.AutoAuth.Method.Config, - } - method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) - return 1 - } - } - - // We do this after auto-auth has been configured, because we don't want to - // confuse the issue of retries for auth failures which have their own - // config and are handled a bit differently. - if os.Getenv(api.EnvVaultMaxRetries) == "" { - client.SetMaxRetries(ctconfig.DefaultRetryAttempts) - if config.Vault != nil { - if config.Vault.Retry != nil { - client.SetMaxRetries(config.Vault.Retry.NumRetries) - } - } - } - - enforceConsistency := cache.EnforceConsistencyNever - whenInconsistent := cache.WhenInconsistentFail - if config.APIProxy != nil { - switch config.APIProxy.EnforceConsistency { - case "always": - enforceConsistency = cache.EnforceConsistencyAlways - case "never", "": - default: - c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) - return 1 - } - - switch config.APIProxy.WhenInconsistent { - case "retry": - whenInconsistent = cache.WhenInconsistentRetry - case "forward": - whenInconsistent = cache.WhenInconsistentForward - case "fail", "": - default: - c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) - return 1 - } - } - - // Warn if cache _and_ cert auto-auth is enabled but certificates were not - // provided in the auto_auth.method["cert"].config stanza. - if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { - _, okCertFile := config.AutoAuth.Method.Config["client_cert"] - _, okCertKey := config.AutoAuth.Method.Config["client_key"] - - // If neither of these exists in the cert stanza, proxy will use the - // certs from the vault stanza. - if !okCertFile && !okCertKey { - c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + - "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + - "specifying certificate information in the 'cert' auto-auth's config stanza.")) - } - - } - - // Output the header that the proxy has started - if !c.logFlags.flagCombineLogs { - c.UI.Output("==> Vault Proxy started! Log data will stream in below:\n") - } - - var leaseCache *cache.LeaseCache - var previousToken string - - proxyClient, err := client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) - return 1 - } - - if config.DisableIdleConnsAPIProxy { - proxyClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAPIProxy { - proxyClient.SetDisableKeepAlives(true) - } - - apiProxyLogger := c.logger.Named("apiproxy") - - // The API proxy to be used, if listeners are configured - apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: proxyClient, - Logger: apiProxyLogger, - EnforceConsistency: enforceConsistency, - WhenInconsistentAction: whenInconsistent, - UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, - UserAgentString: useragent.ProxyAPIProxyString(), - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) - return 1 - } - - // ctx and cancelFunc are passed to the AuthHandler, SinkServer, - // and other subsystems, so that they can listen for ctx.Done() to - // fire and shut down accordingly. - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // Parse proxy cache configurations - if config.Cache != nil { - cacheLogger := c.logger.Named("cache") - - // Create the lease cache proxier and set its underlying proxier to - // the API proxier. - leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: proxyClient, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) - return 1 - } - - // Configure persistent storage and add to LeaseCache - if config.Cache.Persist != nil { - deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) - return 1 - } - previousToken = oldToken - if deferFunc != nil { - defer deferFunc() - } - } - } - - var listeners []net.Listener - - // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. - c.tlsReloadFuncsLock.Lock() - - for i, lnConfig := range config.Listeners { - var ln net.Listener - var tlsCfg *tls.Config - - if lnConfig.Type == listenerutil.BufConnType { - inProcListener := bufconn.Listen(1024 * 1024) - if config.Cache != nil { - config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) - } - ln = inProcListener - } else { - lnBundle, err := cache.StartListener(lnConfig) - if err != nil { - c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) - return 1 - } - - tlsCfg = lnBundle.TLSConfig - ln = lnBundle.Listener - - // Track the reload func, so we can reload later if needed. - c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) - } - - listeners = append(listeners, ln) - - proxyVaultToken := true - var inmemSink sink.Sink - if config.APIProxy != nil { - if config.APIProxy.UseAutoAuthToken { - apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") - inmemSink, err = inmem.New(&sink.SinkConfig{ - Logger: apiProxyLogger, - }, leaseCache) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) - return 1 - } - sinks = append(sinks, &sink.SinkConfig{ - Logger: apiProxyLogger, - Sink: inmemSink, - }) - } - proxyVaultToken = !config.APIProxy.ForceAutoAuthToken - } - - var muxHandler http.Handler - if leaseCache != nil { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) - } else { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) - } - - // Parse 'require_request_header' listener config option, and wrap - // the request handler if necessary - if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { - muxHandler = verifyRequestHeader(muxHandler) - } - - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - quitEnabled := lnConfig.ProxyAPI != nil && lnConfig.ProxyAPI.EnableQuit - - mux.Handle(consts.ProxyPathMetrics, c.handleMetrics()) - if "metrics_only" != lnConfig.Role { - mux.Handle(consts.ProxyPathCacheClear, leaseCache.HandleCacheClear(ctx)) - mux.Handle(consts.ProxyPathQuit, c.handleQuit(quitEnabled)) - mux.Handle("/", muxHandler) - } - - scheme := "https://" - if tlsCfg == nil { - scheme = "http://" - } - if ln.Addr().Network() == "unix" { - scheme = "unix://" - } - - infoKey := fmt.Sprintf("api address %d", i+1) - info[infoKey] = scheme + ln.Addr().String() - infoKeys = append(infoKeys, infoKey) - - server := &http.Server{ - Addr: ln.Addr().String(), - TLSConfig: tlsCfg, - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: apiProxyLogger.StandardLogger(nil), - } - - go server.Serve(ln) - } - - c.tlsReloadFuncsLock.Unlock() - - // Ensure that listeners are closed at all the exits - listenerCloseFunc := func() { - for _, ln := range listeners { - ln.Close() - } - } - defer c.cleanupGuard.Do(listenerCloseFunc) - - // Inform any tests that the server is ready - if c.startedCh != nil { - close(c.startedCh) - } - - var g run.Group - - g.Add(func() error { - for { - select { - case <-c.SighupCh: - c.UI.Output("==> Vault Proxy config reload triggered") - err := c.reloadConfig(c.flagConfigs) - if err != nil { - c.outputErrors(err) - } - // Send the 'reloaded' message on the relevant channel - select { - case c.reloadedCh <- struct{}{}: - default: - } - case <-ctx.Done(): - return nil - } - } - }, func(error) { - cancelFunc() - }) - - // This run group watches for signal termination - g.Add(func() error { - for { - select { - case <-c.ShutdownCh: - c.UI.Output("==> Vault Proxy shutdown triggered") - // Notify systemd that the server is shutting down - // Let the lease cache know this is a shutdown; no need to evict everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - return nil - case <-ctx.Done(): - return nil - case <-winsvc.ShutdownChannel(): - return nil - } - } - }, func(error) {}) - - // Start auto-auth and sink servers - if method != nil { - // Auth Handler is going to set its own retry values, so we want to - // work on a copy of the client to not affect other subsystems. - ahClient, err := c.client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) - return 1 - } - - if config.DisableIdleConnsAutoAuth { - ahClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - ahClient.SetDisableKeepAlives(true) - } - - ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ - Logger: c.logger.Named("auth.handler"), - Client: ahClient, - WrapTTL: config.AutoAuth.Method.WrapTTL, - MinBackoff: config.AutoAuth.Method.MinBackoff, - MaxBackoff: config.AutoAuth.Method.MaxBackoff, - EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - Token: previousToken, - ExitOnError: config.AutoAuth.Method.ExitOnError, - UserAgent: useragent.ProxyAutoAuthString(), - MetricsSignifier: "proxy", - }) - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: c.logger.Named("sink.server"), - Client: ahClient, - ExitAfterAuth: config.ExitAfterAuth, - }) - - g.Add(func() error { - return ah.Run(ctx, method) - }, func(error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - }) - - g.Add(func() error { - err := ss.Run(ctx, ah.OutputCh, sinks) - c.logger.Info("sinks finished, exiting") - - // Start goroutine to drain from ah.OutputCh from this point onward - // to prevent ah.Run from being blocked. - go func() { - for { - select { - case <-ctx.Done(): - return - case <-ah.OutputCh: - } - } - }() - - return err - }, func(error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - }) - } - - // Server configuration output - padding := 24 - sort.Strings(infoKeys) - caser := cases.Title(language.English) - c.UI.Output("==> Vault Proxy configuration:\n") - for _, k := range infoKeys { - c.UI.Output(fmt.Sprintf( - "%s%s: %s", - strings.Repeat(" ", padding-len(k)), - caser.String(k), - info[k])) - } - c.UI.Output("") - - // Release the log gate. - c.logGate.Flush() - - // Write out the PID to the file now that server has successfully started - if err := c.storePidFile(config.PidFile); err != nil { - c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) - return 1 - } - - // Notify systemd that the server is ready (if applicable) - c.notifySystemd(systemd.SdNotifyReady) - - defer func() { - if err := c.removePidFile(config.PidFile); err != nil { - c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) - } - }() - - var exitCode int - if err := g.Run(); err != nil { - c.logger.Error("runtime error encountered", "error", err) - c.UI.Error("Error encountered during run, refer to logs for more details.") - exitCode = 1 - } - c.notifySystemd(systemd.SdNotifyStopping) - return exitCode -} - -// applyConfigOverrides ensures that the config object accurately reflects the desired -// settings as configured by the user. It applies the relevant config setting based -// on the precedence (env var overrides file config, cli overrides env var). -// It mutates the config object supplied. -func (c *ProxyCommand) applyConfigOverrides(f *FlagSets, config *proxyConfig.Config) { - if config.Vault == nil { - config.Vault = &proxyConfig.Vault{} - } - - f.applyLogConfigOverrides(config.SharedConfig) - - f.Visit(func(fl *flag.Flag) { - if fl.Name == flagNameProxyExitAfterAuth { - config.ExitAfterAuth = c.flagExitAfterAuth - } - }) - - c.setStringFlag(f, config.Vault.Address, &StringVar{ - Name: flagNameAddress, - Target: &c.flagAddress, - Default: "https://127.0.0.1:8200", - EnvVar: api.EnvVaultAddress, - }) - config.Vault.Address = c.flagAddress - c.setStringFlag(f, config.Vault.CACert, &StringVar{ - Name: flagNameCACert, - Target: &c.flagCACert, - Default: "", - EnvVar: api.EnvVaultCACert, - }) - config.Vault.CACert = c.flagCACert - c.setStringFlag(f, config.Vault.CAPath, &StringVar{ - Name: flagNameCAPath, - Target: &c.flagCAPath, - Default: "", - EnvVar: api.EnvVaultCAPath, - }) - config.Vault.CAPath = c.flagCAPath - c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ - Name: flagNameClientCert, - Target: &c.flagClientCert, - Default: "", - EnvVar: api.EnvVaultClientCert, - }) - config.Vault.ClientCert = c.flagClientCert - c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ - Name: flagNameClientKey, - Target: &c.flagClientKey, - Default: "", - EnvVar: api.EnvVaultClientKey, - }) - config.Vault.ClientKey = c.flagClientKey - c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ - Name: flagNameTLSSkipVerify, - Target: &c.flagTLSSkipVerify, - Default: false, - EnvVar: api.EnvVaultSkipVerify, - }) - config.Vault.TLSSkipVerify = c.flagTLSSkipVerify - c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ - Name: flagTLSServerName, - Target: &c.flagTLSServerName, - Default: "", - EnvVar: api.EnvVaultTLSServerName, - }) - config.Vault.TLSServerName = c.flagTLSServerName -} - -func (c *ProxyCommand) notifySystemd(status string) { - sent, err := systemd.SdNotify(false, status) - if err != nil { - c.logger.Error("error notifying systemd", "error", err) - } else { - if sent { - c.logger.Debug("sent systemd notification", "notification", status) - } else { - c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) - } - } -} - -func (c *ProxyCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { - var isFlagSet bool - f.Visit(func(f *flag.Flag) { - if f.Name == fVar.Name { - isFlagSet = true - } - }) - - flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) - switch { - case isFlagSet: - // Don't do anything as the flag is already set from the command line - case flagEnvSet: - // Use value from env var - *fVar.Target = flagEnvValue - case configVal != "": - // Use value from config - *fVar.Target = configVal - default: - // Use the default value - *fVar.Target = fVar.Default - } -} - -func (c *ProxyCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { - var isFlagSet bool - f.Visit(func(f *flag.Flag) { - if f.Name == fVar.Name { - isFlagSet = true - } - }) - - flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) - switch { - case isFlagSet: - // Don't do anything as the flag is already set from the command line - case flagEnvSet: - // Use value from env var - *fVar.Target = flagEnvValue != "" - case configVal: - // Use value from config - *fVar.Target = configVal - default: - // Use the default value - *fVar.Target = fVar.Default - } -} - -// storePidFile is used to write out our PID to a file if necessary -func (c *ProxyCommand) storePidFile(pidPath string) error { - // Quit fast if no pidfile - if pidPath == "" { - return nil - } - - // Open the PID file - pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) - if err != nil { - return fmt.Errorf("could not open pid file: %w", err) - } - defer pidFile.Close() - - // Write out the PID - pid := os.Getpid() - _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) - if err != nil { - return fmt.Errorf("could not write to pid file: %w", err) - } - return nil -} - -// removePidFile is used to cleanup the PID file if necessary -func (c *ProxyCommand) removePidFile(pidPath string) error { - if pidPath == "" { - return nil - } - return os.Remove(pidPath) -} - -func (c *ProxyCommand) handleMetrics() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - logical.RespondError(w, http.StatusMethodNotAllowed, nil) - return - } - - if err := r.ParseForm(); err != nil { - logical.RespondError(w, http.StatusBadRequest, err) - return - } - - format := r.Form.Get("format") - if format == "" { - format = metricsutil.FormatFromRequest(&logical.Request{ - Headers: r.Header, - }) - } - - resp := c.metricsHelper.ResponseForFormat(format) - - status := resp.Data[logical.HTTPStatusCode].(int) - w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) - switch v := resp.Data[logical.HTTPRawBody].(type) { - case string: - w.WriteHeader(status) - w.Write([]byte(v)) - case []byte: - w.WriteHeader(status) - w.Write(v) - default: - logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) - } - }) -} - -func (c *ProxyCommand) handleQuit(enabled bool) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !enabled { - w.WriteHeader(http.StatusNotFound) - return - } - - switch r.Method { - case http.MethodPost: - default: - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - c.logger.Debug("received quit request") - close(c.ShutdownCh) - }) -} - -// newLogger creates a logger based on parsed config field on the Proxy Command struct. -func (c *ProxyCommand) newLogger() (log.InterceptLogger, error) { - if c.config == nil { - return nil, fmt.Errorf("cannot create logger, no config") - } - - var errors error - - // Parse all the log related config - logLevel, err := logging.ParseLogLevel(c.config.LogLevel) - if err != nil { - errors = multierror.Append(errors, err) - } - - logFormat, err := logging.ParseLogFormat(c.config.LogFormat) - if err != nil { - errors = multierror.Append(errors, err) - } - - logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) - if err != nil { - errors = multierror.Append(errors, err) - } - - if errors != nil { - return nil, errors - } - - logCfg := &logging.LogConfig{ - Name: "proxy", - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: c.config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: c.config.LogRotateBytes, - LogRotateMaxFiles: c.config.LogRotateMaxFiles, - } - - l, err := logging.Setup(logCfg, c.logWriter) - if err != nil { - return nil, err - } - - return l, nil -} - -// loadConfig attempts to generate a Proxy config from the file(s) specified. -func (c *ProxyCommand) loadConfig(paths []string) (*proxyConfig.Config, error) { - var errors error - cfg := proxyConfig.NewConfig() - - for _, configPath := range paths { - configFromPath, err := proxyConfig.LoadConfig(configPath) - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) - } else { - cfg = cfg.Merge(configFromPath) - } - } - - if errors != nil { - return nil, errors - } - - if err := cfg.ValidateConfig(); err != nil { - return nil, fmt.Errorf("error validating configuration: %w", err) - } - - return cfg, nil -} - -// reloadConfig will attempt to reload the config from file(s) and adjust certain -// config values without requiring a restart of the Vault Proxy. -// If config is retrieved without error it is stored in the config field of the ProxyCommand. -// This operation is not atomic and could result in updated config but partially applied config settings. -// The error returned from this func may be a multierror. -// This function will most likely be called due to Vault Proxy receiving a SIGHUP signal. -// Currently only reloading the following are supported: -// * log level -// * TLS certs for listeners -func (c *ProxyCommand) reloadConfig(paths []string) error { - // Notify systemd that the server is reloading - c.notifySystemd(systemd.SdNotifyReloading) - defer c.notifySystemd(systemd.SdNotifyReady) - - var errors error - - // Reload the config - cfg, err := c.loadConfig(paths) - if err != nil { - // Returning single error as we won't continue with bad config and won't 'commit' it. - return err - } - c.config = cfg - - // Update the log level - err = c.reloadLogLevel() - if err != nil { - errors = multierror.Append(errors, err) - } - - // Update certs - err = c.reloadCerts() - if err != nil { - errors = multierror.Append(errors, err) - } - - return errors -} - -// reloadLogLevel will attempt to update the log level for the logger attached -// to the ProxyCommand struct using the value currently set in config. -func (c *ProxyCommand) reloadLogLevel() error { - logLevel, err := logging.ParseLogLevel(c.config.LogLevel) - if err != nil { - return err - } - - c.logger.SetLevel(logLevel) - - return nil -} - -// reloadCerts will attempt to reload certificates using a reload func which -// was provided when the listeners were configured, only funcs that were appended -// to the ProxyCommand slice will be invoked. -// This function returns a multierror type so that every func can report an error -// if it encounters one. -func (c *ProxyCommand) reloadCerts() error { - var errors error - - c.tlsReloadFuncsLock.RLock() - defer c.tlsReloadFuncsLock.RUnlock() - - for _, reloadFunc := range c.tlsReloadFuncs { - // Non-TLS listeners will have a nil reload func. - if reloadFunc != nil { - err := reloadFunc() - if err != nil { - errors = multierror.Append(errors, err) - } - } - } - - return errors -} - -// outputErrors will take an error or multierror and handle outputting each to the UI -func (c *ProxyCommand) outputErrors(err error) { - if err != nil { - if me, ok := err.(*multierror.Error); ok { - for _, err := range me.Errors { - c.UI.Error(err.Error()) - } - } else { - c.UI.Error(err.Error()) - } - } -} diff --git a/command/proxy/config/config.go b/command/proxy/config/config.go deleted file mode 100644 index f760f1eb04731..0000000000000 --- a/command/proxy/config/config.go +++ /dev/null @@ -1,832 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package config - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "path/filepath" - "strings" - "time" - - ctconfig "github.com/hashicorp/consul-template/config" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/vault/command/agentproxyshared" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/internalshared/configutil" -) - -// Config is the configuration for Vault Proxy. -type Config struct { - *configutil.SharedConfig `hcl:"-"` - - AutoAuth *AutoAuth `hcl:"auto_auth"` - ExitAfterAuth bool `hcl:"exit_after_auth"` - Cache *Cache `hcl:"cache"` - APIProxy *APIProxy `hcl:"api_proxy""` - Vault *Vault `hcl:"vault"` - DisableIdleConns []string `hcl:"disable_idle_connections"` - DisableIdleConnsAPIProxy bool `hcl:"-"` - DisableIdleConnsAutoAuth bool `hcl:"-"` - DisableKeepAlives []string `hcl:"disable_keep_alives"` - DisableKeepAlivesAPIProxy bool `hcl:"-"` - DisableKeepAlivesAutoAuth bool `hcl:"-"` -} - -const ( - DisableIdleConnsEnv = "VAULT_PROXY_DISABLE_IDLE_CONNECTIONS" - DisableKeepAlivesEnv = "VAULT_PROXY_DISABLE_KEEP_ALIVES" -) - -func (c *Config) Prune() { - for _, l := range c.Listeners { - l.RawConfig = nil - l.Profiling.UnusedKeys = nil - l.Telemetry.UnusedKeys = nil - l.CustomResponseHeaders = nil - } - c.FoundKeys = nil - c.UnusedKeys = nil - c.SharedConfig.FoundKeys = nil - c.SharedConfig.UnusedKeys = nil - if c.Telemetry != nil { - c.Telemetry.FoundKeys = nil - c.Telemetry.UnusedKeys = nil - } -} - -type Retry struct { - NumRetries int `hcl:"num_retries"` -} - -// Vault contains configuration for connecting to Vault servers -type Vault struct { - Address string `hcl:"address"` - CACert string `hcl:"ca_cert"` - CAPath string `hcl:"ca_path"` - TLSSkipVerify bool `hcl:"-"` - TLSSkipVerifyRaw interface{} `hcl:"tls_skip_verify"` - ClientCert string `hcl:"client_cert"` - ClientKey string `hcl:"client_key"` - TLSServerName string `hcl:"tls_server_name"` - Retry *Retry `hcl:"retry"` -} - -// transportDialer is an interface that allows passing a custom dialer function -// to an HTTP client's transport config -type transportDialer interface { - // Dial is intended to match https://pkg.go.dev/net#Dialer.Dial - Dial(network, address string) (net.Conn, error) - - // DialContext is intended to match https://pkg.go.dev/net#Dialer.DialContext - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -// APIProxy contains any configuration needed for proxy mode -type APIProxy struct { - UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` - UseAutoAuthToken bool `hcl:"-"` - ForceAutoAuthToken bool `hcl:"-"` - EnforceConsistency string `hcl:"enforce_consistency"` - WhenInconsistent string `hcl:"when_inconsistent"` -} - -// Cache contains any configuration needed for Cache mode -type Cache struct { - Persist *agentproxyshared.PersistConfig `hcl:"persist"` - InProcDialer transportDialer `hcl:"-"` -} - -// AutoAuth is the configured authentication method and sinks -type AutoAuth struct { - Method *Method `hcl:"-"` - Sinks []*Sink `hcl:"sinks"` - - // NOTE: This is unsupported outside of testing and may disappear at any - // time. - EnableReauthOnNewCredentials bool `hcl:"enable_reauth_on_new_credentials"` -} - -// Method represents the configuration for the authentication backend -type Method struct { - Type string - MountPath string `hcl:"mount_path"` - WrapTTLRaw interface{} `hcl:"wrap_ttl"` - WrapTTL time.Duration `hcl:"-"` - MinBackoffRaw interface{} `hcl:"min_backoff"` - MinBackoff time.Duration `hcl:"-"` - MaxBackoffRaw interface{} `hcl:"max_backoff"` - MaxBackoff time.Duration `hcl:"-"` - Namespace string `hcl:"namespace"` - ExitOnError bool `hcl:"exit_on_err"` - Config map[string]interface{} -} - -// Sink defines a location to write the authenticated token -type Sink struct { - Type string - WrapTTLRaw interface{} `hcl:"wrap_ttl"` - WrapTTL time.Duration `hcl:"-"` - DHType string `hcl:"dh_type"` - DeriveKey bool `hcl:"derive_key"` - DHPath string `hcl:"dh_path"` - AAD string `hcl:"aad"` - AADEnvVar string `hcl:"aad_env_var"` - Config map[string]interface{} -} - -func NewConfig() *Config { - return &Config{ - SharedConfig: new(configutil.SharedConfig), - } -} - -// Merge merges two Proxy configurations. -func (c *Config) Merge(c2 *Config) *Config { - if c2 == nil { - return c - } - - result := NewConfig() - - result.SharedConfig = c.SharedConfig - if c2.SharedConfig != nil { - result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) - } - - result.AutoAuth = c.AutoAuth - if c2.AutoAuth != nil { - result.AutoAuth = c2.AutoAuth - } - - result.Cache = c.Cache - if c2.Cache != nil { - result.Cache = c2.Cache - } - - result.APIProxy = c.APIProxy - if c2.APIProxy != nil { - result.APIProxy = c2.APIProxy - } - - result.DisableMlock = c.DisableMlock - if c2.DisableMlock { - result.DisableMlock = c2.DisableMlock - } - - // For these, ignore the non-specific one and overwrite them all - result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth - if c2.DisableIdleConnsAutoAuth { - result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth - } - - result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy - if c2.DisableIdleConnsAPIProxy { - result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy - } - - result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth - if c2.DisableKeepAlivesAutoAuth { - result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth - } - - result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy - if c2.DisableKeepAlivesAPIProxy { - result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy - } - - result.ExitAfterAuth = c.ExitAfterAuth - if c2.ExitAfterAuth { - result.ExitAfterAuth = c2.ExitAfterAuth - } - - result.Vault = c.Vault - if c2.Vault != nil { - result.Vault = c2.Vault - } - - result.PidFile = c.PidFile - if c2.PidFile != "" { - result.PidFile = c2.PidFile - } - - return result -} - -// ValidateConfig validates a Vault configuration after it has been fully merged together, to -// ensure that required combinations of configs are there -func (c *Config) ValidateConfig() error { - if c.Cache != nil { - if len(c.Listeners) < 1 { - return fmt.Errorf("enabling the cache requires at least 1 listener to be defined") - } - } - - if c.APIProxy != nil { - if len(c.Listeners) < 1 { - return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") - } - - if c.APIProxy.UseAutoAuthToken { - if c.AutoAuth == nil { - return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") - } - if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { - return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") - } - } - } - - if c.AutoAuth != nil { - if len(c.AutoAuth.Sinks) == 0 && - (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) { - return fmt.Errorf("auto_auth requires at least one sink or api_proxy.use_auto_auth_token=true") - } - } - - if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { - return fmt.Errorf("no auto_auth, cache, or listener block found in config") - } - - return nil -} - -// LoadConfig loads the configuration at the given path, regardless if -// it's a file or directory. -func LoadConfig(path string) (*Config, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - - if fi.IsDir() { - return LoadConfigDir(path) - } - return LoadConfigFile(path) -} - -// LoadConfigDir loads the configuration at the given path if it's a directory -func LoadConfigDir(dir string) (*Config, error) { - f, err := os.Open(dir) - if err != nil { - return nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, err - } - if !fi.IsDir() { - return nil, fmt.Errorf("configuration path must be a directory: %q", dir) - } - - var files []string - err = nil - for err != io.EOF { - var fis []os.FileInfo - fis, err = f.Readdir(128) - if err != nil && err != io.EOF { - return nil, err - } - - for _, fi := range fis { - // Ignore directories - if fi.IsDir() { - continue - } - - // Only care about files that are valid to load. - name := fi.Name() - skip := true - if strings.HasSuffix(name, ".hcl") { - skip = false - } else if strings.HasSuffix(name, ".json") { - skip = false - } - if skip || isTemporaryFile(name) { - continue - } - - path := filepath.Join(dir, name) - files = append(files, path) - } - } - - result := NewConfig() - for _, f := range files { - config, err := LoadConfigFile(f) - if err != nil { - return nil, fmt.Errorf("error loading %q: %w", f, err) - } - - if result == nil { - result = config - } else { - result = result.Merge(config) - } - } - - return result, nil -} - -// isTemporaryFile returns true or false depending on whether the -// provided file name is a temporary file for the following editors: -// emacs or vim. -func isTemporaryFile(name string) bool { - return strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, ".#") || // emacs - (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs -} - -// LoadConfigFile loads the configuration at the given path if it's a file -func LoadConfigFile(path string) (*Config, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - - if fi.IsDir() { - return nil, fmt.Errorf("location is a directory, not a file") - } - - // Read the file - d, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - // Parse! - obj, err := hcl.Parse(string(d)) - if err != nil { - return nil, err - } - - // Attribute - ast.Walk(obj, func(n ast.Node) (ast.Node, bool) { - if k, ok := n.(*ast.ObjectKey); ok { - k.Token.Pos.Filename = path - } - return n, true - }) - - // Start building the result - result := NewConfig() - if err := hcl.DecodeObject(result, obj); err != nil { - return nil, err - } - - sharedConfig, err := configutil.ParseConfig(string(d)) - if err != nil { - return nil, err - } - - // Pruning custom headers for Vault for now - for _, ln := range sharedConfig.Listeners { - ln.CustomResponseHeaders = nil - } - - result.SharedConfig = sharedConfig - - list, ok := obj.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("error parsing: file doesn't contain a root object") - } - - if err := parseAutoAuth(result, list); err != nil { - return nil, fmt.Errorf("error parsing 'auto_auth': %w", err) - } - - if err := parseCache(result, list); err != nil { - return nil, fmt.Errorf("error parsing 'cache':%w", err) - } - - if err := parseAPIProxy(result, list); err != nil { - return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) - } - - err = parseVault(result, list) - if err != nil { - return nil, fmt.Errorf("error parsing 'vault':%w", err) - } - - if result.Vault != nil { - // Set defaults - if result.Vault.Retry == nil { - result.Vault.Retry = &Retry{} - } - switch result.Vault.Retry.NumRetries { - case 0: - result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts - case -1: - result.Vault.Retry.NumRetries = 0 - } - } - - if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { - result.DisableIdleConns, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableIdleConnsEnv)) - if err != nil { - return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableIdleConnsEnv, err) - } - } - - for _, subsystem := range result.DisableIdleConns { - switch subsystem { - case "auto-auth": - result.DisableIdleConnsAutoAuth = true - case "caching", "proxying": - result.DisableIdleConnsAPIProxy = true - case "": - continue - default: - return nil, fmt.Errorf("unknown disable_idle_connections value: %s", subsystem) - } - } - - if disableKeepAlivesEnv := os.Getenv(DisableKeepAlivesEnv); disableKeepAlivesEnv != "" { - result.DisableKeepAlives, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableKeepAlivesEnv)) - if err != nil { - return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableKeepAlivesEnv, err) - } - } - - for _, subsystem := range result.DisableKeepAlives { - switch subsystem { - case "auto-auth": - result.DisableKeepAlivesAutoAuth = true - case "caching", "proxying": - result.DisableKeepAlivesAPIProxy = true - case "": - continue - default: - return nil, fmt.Errorf("unknown disable_keep_alives value: %s", subsystem) - } - } - - return result, nil -} - -func parseVault(result *Config, list *ast.ObjectList) error { - name := "vault" - - vaultList := list.Filter(name) - if len(vaultList.Items) == 0 { - return nil - } - - if len(vaultList.Items) > 1 { - return fmt.Errorf("one and only one %q block is required", name) - } - - item := vaultList.Items[0] - - var v Vault - err := hcl.DecodeObject(&v, item.Val) - if err != nil { - return err - } - - if v.TLSSkipVerifyRaw != nil { - v.TLSSkipVerify, err = parseutil.ParseBool(v.TLSSkipVerifyRaw) - if err != nil { - return err - } - } - - result.Vault = &v - - subs, ok := item.Val.(*ast.ObjectType) - if !ok { - return fmt.Errorf("could not parse %q as an object", name) - } - - if err := parseRetry(result, subs.List); err != nil { - return fmt.Errorf("error parsing 'retry': %w", err) - } - - return nil -} - -func parseRetry(result *Config, list *ast.ObjectList) error { - name := "retry" - - retryList := list.Filter(name) - if len(retryList.Items) == 0 { - return nil - } - - if len(retryList.Items) > 1 { - return fmt.Errorf("one and only one %q block is required", name) - } - - item := retryList.Items[0] - - var r Retry - err := hcl.DecodeObject(&r, item.Val) - if err != nil { - return err - } - - result.Vault.Retry = &r - - return nil -} - -func parseAPIProxy(result *Config, list *ast.ObjectList) error { - name := "api_proxy" - - apiProxyList := list.Filter(name) - if len(apiProxyList.Items) == 0 { - return nil - } - - if len(apiProxyList.Items) > 1 { - return fmt.Errorf("one and only one %q block is required", name) - } - - item := apiProxyList.Items[0] - - var apiProxy APIProxy - err := hcl.DecodeObject(&apiProxy, item.Val) - if err != nil { - return err - } - - if apiProxy.UseAutoAuthTokenRaw != nil { - apiProxy.UseAutoAuthToken, err = parseutil.ParseBool(apiProxy.UseAutoAuthTokenRaw) - if err != nil { - // Could be a value of "force" instead of "true"/"false" - switch apiProxy.UseAutoAuthTokenRaw.(type) { - case string: - v := apiProxy.UseAutoAuthTokenRaw.(string) - - if !strings.EqualFold(v, "force") { - return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", apiProxy.UseAutoAuthTokenRaw) - } - apiProxy.UseAutoAuthToken = true - apiProxy.ForceAutoAuthToken = true - - default: - return err - } - } - } - result.APIProxy = &apiProxy - - return nil -} - -func parseCache(result *Config, list *ast.ObjectList) error { - name := "cache" - - cacheList := list.Filter(name) - if len(cacheList.Items) == 0 { - return nil - } - - if len(cacheList.Items) > 1 { - return fmt.Errorf("one and only one %q block is required", name) - } - - item := cacheList.Items[0] - - var c Cache - err := hcl.DecodeObject(&c, item.Val) - if err != nil { - return err - } - - result.Cache = &c - - subs, ok := item.Val.(*ast.ObjectType) - if !ok { - return fmt.Errorf("could not parse %q as an object", name) - } - subList := subs.List - if err := parsePersist(result, subList); err != nil { - return fmt.Errorf("error parsing persist: %w", err) - } - - return nil -} - -func parsePersist(result *Config, list *ast.ObjectList) error { - name := "persist" - - persistList := list.Filter(name) - if len(persistList.Items) == 0 { - return nil - } - - if len(persistList.Items) > 1 { - return fmt.Errorf("only one %q block is required", name) - } - - item := persistList.Items[0] - - var p agentproxyshared.PersistConfig - err := hcl.DecodeObject(&p, item.Val) - if err != nil { - return err - } - - if p.Type == "" { - if len(item.Keys) == 1 { - p.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) - } - if p.Type == "" { - return errors.New("persist type must be specified") - } - } - - result.Cache.Persist = &p - - return nil -} - -func parseAutoAuth(result *Config, list *ast.ObjectList) error { - name := "auto_auth" - - autoAuthList := list.Filter(name) - if len(autoAuthList.Items) == 0 { - return nil - } - if len(autoAuthList.Items) > 1 { - return fmt.Errorf("at most one %q block is allowed", name) - } - - // Get our item - item := autoAuthList.Items[0] - - var a AutoAuth - if err := hcl.DecodeObject(&a, item.Val); err != nil { - return err - } - - result.AutoAuth = &a - - subs, ok := item.Val.(*ast.ObjectType) - if !ok { - return fmt.Errorf("could not parse %q as an object", name) - } - subList := subs.List - - if err := parseMethod(result, subList); err != nil { - return fmt.Errorf("error parsing 'method': %w", err) - } - if a.Method == nil { - return fmt.Errorf("no 'method' block found") - } - - if err := parseSinks(result, subList); err != nil { - return fmt.Errorf("error parsing 'sink' stanzas: %w", err) - } - - if result.AutoAuth.Method.WrapTTL > 0 { - if len(result.AutoAuth.Sinks) != 1 { - return fmt.Errorf("error parsing auto_auth: wrapping enabled on auth method and 0 or many sinks defined") - } - - if result.AutoAuth.Sinks[0].WrapTTL > 0 { - return fmt.Errorf("error parsing auto_auth: wrapping enabled both on auth method and sink") - } - } - - if result.AutoAuth.Method.MaxBackoffRaw != nil { - var err error - if result.AutoAuth.Method.MaxBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MaxBackoffRaw); err != nil { - return err - } - result.AutoAuth.Method.MaxBackoffRaw = nil - } - - if result.AutoAuth.Method.MinBackoffRaw != nil { - var err error - if result.AutoAuth.Method.MinBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MinBackoffRaw); err != nil { - return err - } - result.AutoAuth.Method.MinBackoffRaw = nil - } - - return nil -} - -func parseMethod(result *Config, list *ast.ObjectList) error { - name := "method" - - methodList := list.Filter(name) - if len(methodList.Items) != 1 { - return fmt.Errorf("one and only one %q block is required", name) - } - - // Get our item - item := methodList.Items[0] - - var m Method - if err := hcl.DecodeObject(&m, item.Val); err != nil { - return err - } - - if m.Type == "" { - if len(item.Keys) == 1 { - m.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) - } - if m.Type == "" { - return errors.New("method type must be specified") - } - } - - // Default to Vault's default - if m.MountPath == "" { - m.MountPath = fmt.Sprintf("auth/%s", m.Type) - } - // Standardize on no trailing slash - m.MountPath = strings.TrimSuffix(m.MountPath, "/") - - if m.WrapTTLRaw != nil { - var err error - if m.WrapTTL, err = parseutil.ParseDurationSecond(m.WrapTTLRaw); err != nil { - return err - } - m.WrapTTLRaw = nil - } - - // Canonicalize namespace path if provided - m.Namespace = namespace.Canonicalize(m.Namespace) - - result.AutoAuth.Method = &m - return nil -} - -func parseSinks(result *Config, list *ast.ObjectList) error { - name := "sink" - - sinkList := list.Filter(name) - if len(sinkList.Items) < 1 { - return nil - } - - var ts []*Sink - - for _, item := range sinkList.Items { - var s Sink - if err := hcl.DecodeObject(&s, item.Val); err != nil { - return err - } - - if s.Type == "" { - if len(item.Keys) == 1 { - s.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) - } - if s.Type == "" { - return errors.New("sink type must be specified") - } - } - - if s.WrapTTLRaw != nil { - var err error - if s.WrapTTL, err = parseutil.ParseDurationSecond(s.WrapTTLRaw); err != nil { - return multierror.Prefix(err, fmt.Sprintf("sink.%s", s.Type)) - } - s.WrapTTLRaw = nil - } - - switch s.DHType { - case "": - case "curve25519": - default: - return multierror.Prefix(errors.New("invalid value for 'dh_type'"), fmt.Sprintf("sink.%s", s.Type)) - } - - if s.AADEnvVar != "" { - s.AAD = os.Getenv(s.AADEnvVar) - s.AADEnvVar = "" - } - - switch { - case s.DHPath == "" && s.DHType == "": - if s.AAD != "" { - return multierror.Prefix(errors.New("specifying AAD data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) - } - if s.DeriveKey { - return multierror.Prefix(errors.New("specifying 'derive_key' data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) - } - case s.DHPath != "" && s.DHType != "": - default: - return multierror.Prefix(errors.New("'dh_type' and 'dh_path' must be specified together"), fmt.Sprintf("sink.%s", s.Type)) - } - - ts = append(ts, &s) - } - - result.AutoAuth.Sinks = ts - return nil -} diff --git a/command/proxy/config/config_test.go b/command/proxy/config/config_test.go deleted file mode 100644 index 612d7a6865085..0000000000000 --- a/command/proxy/config/config_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package config - -import ( - "testing" - - "github.com/go-test/deep" - "github.com/hashicorp/vault/command/agentproxyshared" - "github.com/hashicorp/vault/internalshared/configutil" -) - -// TestLoadConfigFile_ProxyCache tests loading a config file containing a cache -// as well as a valid proxy config. -func TestLoadConfigFile_ProxyCache(t *testing.T) { - config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - PidFile: "./pidfile", - Listeners: []*configutil.Listener{ - { - Type: "unix", - Address: "/path/to/socket", - TLSDisable: true, - SocketMode: "configmode", - SocketUser: "configuser", - SocketGroup: "configgroup", - }, - { - Type: "tcp", - Address: "127.0.0.1:8300", - TLSDisable: true, - }, - { - Type: "tcp", - Address: "127.0.0.1:3000", - Role: "metrics_only", - TLSDisable: true, - }, - { - Type: "tcp", - Role: "default", - Address: "127.0.0.1:8400", - TLSKeyFile: "/path/to/cakey.pem", - TLSCertFile: "/path/to/cacert.pem", - }, - }, - }, - AutoAuth: &AutoAuth{ - Method: &Method{ - Type: "aws", - MountPath: "auth/aws", - Config: map[string]interface{}{ - "role": "foobar", - }, - }, - Sinks: []*Sink{ - { - Type: "file", - DHType: "curve25519", - DHPath: "/tmp/file-foo-dhpath", - AAD: "foobar", - Config: map[string]interface{}{ - "path": "/tmp/file-foo", - }, - }, - }, - }, - APIProxy: &APIProxy{ - EnforceConsistency: "always", - WhenInconsistent: "retry", - UseAutoAuthTokenRaw: true, - UseAutoAuthToken: true, - ForceAutoAuthToken: false, - }, - Cache: &Cache{ - Persist: &agentproxyshared.PersistConfig{ - Type: "kubernetes", - Path: "/vault/agent-cache/", - KeepAfterImport: true, - ExitOnErr: true, - ServiceAccountTokenFile: "/tmp/serviceaccount/token", - }, - }, - Vault: &Vault{ - Address: "http://127.0.0.1:1111", - CACert: "config_ca_cert", - CAPath: "config_ca_path", - TLSSkipVerifyRaw: interface{}("true"), - TLSSkipVerify: true, - ClientCert: "config_client_cert", - ClientKey: "config_client_key", - Retry: &Retry{ - NumRetries: 12, - }, - }, - } - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } - - config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") - if err != nil { - t.Fatal(err) - } - expected.Vault.TLSSkipVerifyRaw = interface{}(true) - - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} diff --git a/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl deleted file mode 100644 index a7d8ef44c59e2..0000000000000 --- a/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} - -api_proxy { - use_auto_auth_token = true - enforce_consistency = "always" - when_inconsistent = "retry" -} - -cache { - persist "kubernetes" { - path = "/vault/agent-cache/" - keep_after_import = true - exit_on_err = true - service_account_token_file = "/tmp/serviceaccount/token" - } -} - -listener { - type = "unix" - address = "/path/to/socket" - tls_disable = true - socket_mode = "configmode" - socket_user = "configuser" - socket_group = "configgroup" -} - -listener { - type = "tcp" - address = "127.0.0.1:8300" - tls_disable = true -} - -listener { - type = "tcp" - address = "127.0.0.1:3000" - tls_disable = true - role = "metrics_only" -} - -listener { - type = "tcp" - role = "default" - address = "127.0.0.1:8400" - tls_key_file = "/path/to/cakey.pem" - tls_cert_file = "/path/to/cacert.pem" -} - -vault { - address = "http://127.0.0.1:1111" - ca_cert = "config_ca_cert" - ca_path = "config_ca_path" - tls_skip_verify = true - client_cert = "config_client_cert" - client_key = "config_client_key" -} diff --git a/command/proxy/config/test-fixtures/config-cache.hcl b/command/proxy/config/test-fixtures/config-cache.hcl deleted file mode 100644 index d770391fe5b09..0000000000000 --- a/command/proxy/config/test-fixtures/config-cache.hcl +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -pid_file = "./pidfile" - -auto_auth { - method { - type = "aws" - config = { - role = "foobar" - } - } - - sink { - type = "file" - config = { - path = "/tmp/file-foo" - } - aad = "foobar" - dh_type = "curve25519" - dh_path = "/tmp/file-foo-dhpath" - } -} - -api_proxy { - use_auto_auth_token = true - enforce_consistency = "always" - when_inconsistent = "retry" -} - -cache { - persist = { - type = "kubernetes" - path = "/vault/agent-cache/" - keep_after_import = true - exit_on_err = true - service_account_token_file = "/tmp/serviceaccount/token" - } -} - -listener "unix" { - address = "/path/to/socket" - tls_disable = true - socket_mode = "configmode" - socket_user = "configuser" - socket_group = "configgroup" -} - -listener "tcp" { - address = "127.0.0.1:8300" - tls_disable = true -} - -listener { - type = "tcp" - address = "127.0.0.1:3000" - tls_disable = true - role = "metrics_only" -} - -listener "tcp" { - role = "default" - address = "127.0.0.1:8400" - tls_key_file = "/path/to/cakey.pem" - tls_cert_file = "/path/to/cacert.pem" -} - -vault { - address = "http://127.0.0.1:1111" - ca_cert = "config_ca_cert" - ca_path = "config_ca_path" - tls_skip_verify = "true" - client_cert = "config_client_cert" - client_key = "config_client_key" -} diff --git a/command/proxy/test-fixtures/reload/reload_bar.key b/command/proxy/test-fixtures/reload/reload_bar.key deleted file mode 100644 index 10849fbe1d7f3..0000000000000 --- a/command/proxy/test-fixtures/reload/reload_bar.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju -Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj -7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl -/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz -q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 -XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ -ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF -V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q -g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ -zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt -V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC -is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS -Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU -8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB -1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L -m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti -y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ -XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z -kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 -qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX -Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft -b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT -9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH -4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab -JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil ------END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_bar.pem b/command/proxy/test-fixtures/reload/reload_bar.pem deleted file mode 100644 index a8217be5c7dfd..0000000000000 --- a/command/proxy/test-fixtures/reload/reload_bar.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw -MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon -mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm -MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy -uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e -e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c -NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw -gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F -7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl -vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL -BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw -SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP -UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC -a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q -W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj -RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= ------END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_ca.pem b/command/proxy/test-fixtures/reload/reload_ca.pem deleted file mode 100644 index 72a74440c4820..0000000000000 --- a/command/proxy/test-fixtures/reload/reload_ca.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw -MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k -JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM -SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ -VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ -9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad -KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD -VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb -U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG -A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 -hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX -Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf -oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 -Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a -mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 -XRz2GCwCNyvW ------END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.key b/command/proxy/test-fixtures/reload/reload_foo.key deleted file mode 100644 index 86e6cce63e647..0000000000000 --- a/command/proxy/test-fixtures/reload/reload_foo.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i -ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX -xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A -A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc -gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g -Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV -I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io -yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds -a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey -szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX -Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU -02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK -BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ -LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa -69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L -M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 -Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV -gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ -p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X -PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ -3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO -FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 -bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT -jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa -5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P ------END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.pem b/command/proxy/test-fixtures/reload/reload_foo.pem deleted file mode 100644 index c8b868bcd0f08..0000000000000 --- a/command/proxy/test-fixtures/reload/reload_foo.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw -MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D -j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 -bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 -EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 -sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l -8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw -gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ -dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl -vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL -BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy -fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc -sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh -RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 -oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene -Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= ------END CERTIFICATE----- diff --git a/command/proxy_test.go b/command/proxy_test.go deleted file mode 100644 index 01aba59dbc8f3..0000000000000 --- a/command/proxy_test.go +++ /dev/null @@ -1,1254 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "net/http" - "os" - "path/filepath" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" - logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" - "github.com/hashicorp/vault/api" - credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent" - proxyConfig "github.com/hashicorp/vault/command/proxy/config" - "github.com/hashicorp/vault/helper/testhelpers/minimal" - "github.com/hashicorp/vault/helper/useragent" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func testProxyCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *ProxyCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &ProxyCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - logger: logger, - startedCh: make(chan struct{}, 5), - reloadedCh: make(chan struct{}, 5), - } -} - -// TestProxy_ExitAfterAuth tests the exit_after_auth flag, provided both -// as config and via -exit-after-auth. -func TestProxy_ExitAfterAuth(t *testing.T) { - t.Run("via_config", func(t *testing.T) { - testProxyExitAfterAuth(t, false) - }) - - t.Run("via_flag", func(t *testing.T) { - testProxyExitAfterAuth(t, true) - }) -} - -func testProxyExitAfterAuth(t *testing.T, viaFlag bool) { - logger := logging.NewVaultLogger(hclog.Trace) - coreConfig := &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "jwt": vaultjwt.Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - client := cluster.Cores[0].Client - - // Setup Vault - err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ - Type: "jwt", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ - "bound_issuer": "https://team-vault.auth0.com/", - "jwt_validation_pubkeys": agent.TestECDSAPubKey, - "jwt_supported_algs": "ES256", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ - "role_type": "jwt", - "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", - "bound_audiences": "https://vault.plugin.auth.jwt.test", - "user_claim": "https://vault/user", - "groups_claim": "https://vault/groups", - "policies": "test", - "period": "3s", - }) - if err != nil { - t.Fatal(err) - } - - dir := t.TempDir() - inf, err := os.CreateTemp(dir, "auth.jwt.test.") - if err != nil { - t.Fatal(err) - } - in := inf.Name() - inf.Close() - // We remove these files in this test since we don't need the files, we just need - // a non-conflicting file name for the config. - os.Remove(in) - t.Logf("input: %s", in) - - sink1f, err := os.CreateTemp(dir, "sink1.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink1 := sink1f.Name() - sink1f.Close() - os.Remove(sink1) - t.Logf("sink1: %s", sink1) - - sink2f, err := os.CreateTemp(dir, "sink2.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink2 := sink2f.Name() - sink2f.Close() - os.Remove(sink2) - t.Logf("sink2: %s", sink2) - - conff, err := os.CreateTemp(dir, "conf.jwt.test.") - if err != nil { - t.Fatal(err) - } - conf := conff.Name() - conff.Close() - os.Remove(conf) - t.Logf("config: %s", conf) - - jwtToken, _ := agent.GetTestJWT(t) - if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { - t.Fatal(err) - } else { - logger.Trace("wrote test jwt", "path", in) - } - - exitAfterAuthTemplText := "exit_after_auth = true" - if viaFlag { - exitAfterAuthTemplText = "" - } - - config := ` -%s - -auto_auth { - method { - type = "jwt" - config = { - role = "test" - path = "%s" - } - } - - sink { - type = "file" - config = { - path = "%s" - } - } - - sink "file" { - config = { - path = "%s" - } - } -} -` - - config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) - if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { - t.Fatal(err) - } else { - logger.Trace("wrote test config", "path", conf) - } - - doneCh := make(chan struct{}) - go func() { - ui, cmd := testProxyCommand(t, logger) - cmd.client = client - - args := []string{"-config", conf} - if viaFlag { - args = append(args, "-exit-after-auth") - } - - code := cmd.Run(args) - if code != 0 { - t.Errorf("expected %d to be %d", code, 0) - t.Logf("output from proxy:\n%s", ui.OutputWriter.String()) - t.Logf("error from proxy:\n%s", ui.ErrorWriter.String()) - } - close(doneCh) - }() - - select { - case <-doneCh: - break - case <-time.After(1 * time.Minute): - t.Fatal("timeout reached while waiting for proxy to exit") - } - - sink1Bytes, err := os.ReadFile(sink1) - if err != nil { - t.Fatal(err) - } - if len(sink1Bytes) == 0 { - t.Fatal("got no output from sink 1") - } - - sink2Bytes, err := os.ReadFile(sink2) - if err != nil { - t.Fatal(err) - } - if len(sink2Bytes) == 0 { - t.Fatal("got no output from sink 2") - } - - if string(sink1Bytes) != string(sink2Bytes) { - t.Fatal("sink 1/2 values don't match") - } -} - -// TestProxy_AutoAuth_UserAgent tests that the User-Agent sent -// to Vault by Vault Proxy is correct when performing Auto-Auth. -// Uses the custom handler userAgentHandler (defined above) so -// that Vault validates the User-Agent on requests sent by Proxy. -func TestProxy_AutoAuth_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - var h userAgentHandler - cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - }, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.ProxyAutoAuthString() - h.requestMethodToCheck = "PUT" - h.pathToCheck = "auth/approle/login" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "secret_id_num_uses": "10", - "secret_id_ttl": "1m", - "token_max_ttl": "1m", - "token_num_uses": "10", - "token_ttl": "1m", - "policies": "default" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) - - sinkf, err := os.CreateTemp("", "sink.test.") - if err != nil { - t.Fatal(err) - } - sink := sinkf.Name() - sinkf.Close() - os.Remove(sink) - - autoAuthConfig := fmt.Sprintf(` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - } - } - - sink "file" { - config = { - path = "%s" - } - } -}`, roleIDPath, secretIDPath, sink) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -api_proxy { - use_auto_auth_token = true -} -%s -%s -`, serverClient.Address(), listenConfig, autoAuthConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Unset the environment variable so that proxy picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - // Start proxy - _, cmd := testProxyCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // Validate that the auto-auth token has been correctly attained - // and works for LookupSelf - conf := api.DefaultConfig() - conf.Address = "http://" + listenAddr - proxyClient, err := api.NewClient(conf) - if err != nil { - t.Fatalf("err: %s", err) - } - - proxyClient.SetToken("") - err = proxyClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - // Wait for the token to be sent to syncs and be available to be used - time.Sleep(5 * time.Second) - - req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") - body = request(t, proxyClient, req, 200) - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestProxy_APIProxyWithoutCache_UserAgent tests that the User-Agent sent -// to Vault by Vault Proxy is correct using the API proxy without -// the cache configured. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Proxy. -func TestProxy_APIProxyWithoutCache_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - userAgentForProxiedClient := "proxied-client" - var h userAgentHandler - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) - h.pathToCheck = "/v1/auth/token/lookup-self" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that proxy picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -`, serverClient.Address(), listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the proxy - _, cmd := testProxyCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - proxyClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) - proxyClient.SetToken(serverClient.Token()) - proxyClient.SetMaxRetries(0) - err = proxyClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - _, err = proxyClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestProxy_APIProxyWithCache_UserAgent tests that the User-Agent sent -// to Vault by Vault Proxy is correct using the API proxy with -// the cache configured. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Proxy. -func TestProxy_APIProxyWithCache_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - userAgentForProxiedClient := "proxied-client" - var h userAgentHandler - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) - h.pathToCheck = "/v1/auth/token/lookup-self" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that proxy picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - cacheConfig := ` -cache { -}` - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), listenConfig, cacheConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the proxy - _, cmd := testProxyCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - proxyClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) - proxyClient.SetToken(serverClient.Token()) - proxyClient.SetMaxRetries(0) - err = proxyClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - _, err = proxyClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestProxy_Cache_DynamicSecret Tests that the cache successfully caches a dynamic secret -// going through the Proxy, -func TestProxy_Cache_DynamicSecret(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that proxy picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - cacheConfig := ` -cache { -} -` - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), cacheConfig, listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start proxy - _, cmd := testProxyCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - proxyClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - proxyClient.SetToken(serverClient.Token()) - proxyClient.SetMaxRetries(0) - err = proxyClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - renewable := true - tokenCreateRequest := &api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", - Renewable: &renewable, - } - - // This was the simplest test I could find to trigger the caching behaviour, - // i.e. the most concise I could make the test that I can tell - // creating an orphan token returns Auth, is renewable, and isn't a token - // that's managed elsewhere (since it's an orphan) - secret, err := proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Auth == nil { - t.Fatalf("secret not as expected: %v", secret) - } - - token := secret.Auth.ClientToken - - secret, err = proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Auth == nil { - t.Fatalf("secret not as expected: %v", secret) - } - - token2 := secret.Auth.ClientToken - - if token != token2 { - t.Fatalf("token create response not cached when it should have been, as tokens differ") - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestProxy_ApiProxy_Retry Tests the retry functionalities of Vault Proxy's API Proxy -func TestProxy_ApiProxy_Retry(t *testing.T) { - //---------------------------------------------------- - // Start the server and proxy - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - var h handler - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that proxy picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ - "bar": "baz", - }) - if err != nil { - t.Fatal(err) - } - - intRef := func(i int) *int { - return &i - } - // start test cases here - testCases := map[string]struct { - retries *int - expectError bool - }{ - "none": { - retries: intRef(-1), - expectError: true, - }, - "one": { - retries: intRef(1), - expectError: true, - }, - "two": { - retries: intRef(2), - expectError: false, - }, - "missing": { - retries: nil, - expectError: false, - }, - "default": { - retries: intRef(0), - expectError: false, - }, - } - - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - h.failCount = 2 - - cacheConfig := ` -cache { -} -` - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - var retryConf string - if tc.retries != nil { - retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) - } - - config := fmt.Sprintf(` -vault { - address = "%s" - %s - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), retryConf, cacheConfig, listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - _, cmd := testProxyCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - client.SetToken(serverClient.Token()) - client.SetMaxRetries(0) - err = client.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - secret, err := client.Logical().Read("secret/foo") - switch { - case (err != nil || secret == nil) && tc.expectError: - case (err == nil || secret != nil) && !tc.expectError: - default: - t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) - } - if secret != nil && secret.Data["foo"] != nil { - val := secret.Data["foo"].(map[string]interface{}) - if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { - t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) - } - } - time.Sleep(time.Second) - - close(cmd.ShutdownCh) - wg.Wait() - }) - } -} - -// TestProxy_Metrics tests that metrics are being properly reported. -func TestProxy_Metrics(t *testing.T) { - // Start a vault server - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Create a config file - listenAddr := generateListenerAddress(t) - config := fmt.Sprintf(` -cache {} - -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - ui, cmd := testProxyCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - code := cmd.Run([]string{"-config", configPath}) - if code != 0 { - t.Errorf("non-zero return code when running proxy: %d", code) - t.Logf("STDOUT from proxy:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from proxy:\n%s", ui.ErrorWriter.String()) - } - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // defer proxy shutdown - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() - }() - - conf := api.DefaultConfig() - conf.Address = "http://" + listenAddr - proxyClient, err := api.NewClient(conf) - if err != nil { - t.Fatalf("err: %s", err) - } - - req := proxyClient.NewRequest("GET", "/proxy/v1/metrics") - body := request(t, proxyClient, req, 200) - keys := []string{} - for k := range body { - keys = append(keys, k) - } - require.ElementsMatch(t, keys, []string{ - "Counters", - "Samples", - "Timestamp", - "Gauges", - "Points", - }) -} - -// TestProxy_QuitAPI Tests the /proxy/v1/quit API that can be enabled for the proxy. -func TestProxy_QuitAPI(t *testing.T) { - cluster := minimal.NewTestSoloCluster(t, nil) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that proxy picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - err := os.Unsetenv(api.EnvVaultAddress) - if err != nil { - t.Fatal(err) - } - - listenAddr := generateListenerAddress(t) - listenAddr2 := generateListenerAddress(t) - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} - -listener "tcp" { - address = "%s" - tls_disable = true -} - -listener "tcp" { - address = "%s" - tls_disable = true - proxy_api { - enable_quit = true - } -} - -cache {} -`, serverClient.Address(), listenAddr, listenAddr2) - - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - _, cmd := testProxyCommand(t, nil) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - client.SetToken(serverClient.Token()) - client.SetMaxRetries(0) - err = client.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - // First try on listener 1 where the API should be disabled. - resp, err := client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) - if err == nil { - t.Fatalf("expected error") - } - if resp != nil && resp.StatusCode != http.StatusNotFound { - t.Fatalf("expected %d but got: %d", http.StatusNotFound, resp.StatusCode) - } - - // Now try on listener 2 where the quit API should be enabled. - err = client.SetAddress("http://" + listenAddr2) - if err != nil { - t.Fatal(err) - } - - _, err = client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - select { - case <-cmd.ShutdownCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - wg.Wait() -} - -// TestProxy_LogFile_CliOverridesConfig tests that the CLI values -// override the config for log files -func TestProxy_LogFile_CliOverridesConfig(t *testing.T) { - // Create basic config - configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) - cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Sanity check that the config value is the current value - assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile) - - // Initialize the command and parse any flags - cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} - f := cmd.Flags() - // Simulate the flag being specified - err = f.Parse([]string{"-log-file=/foo/bar/test.log"}) - if err != nil { - t.Fatal(err) - } - - // Update the config based on the inputs. - cmd.applyConfigOverrides(f, cfg) - - assert.NotEqual(t, "TMPDIR/juan.log", cfg.LogFile) - assert.NotEqual(t, "/squiggle/logs.txt", cfg.LogFile) - assert.Equal(t, "/foo/bar/test.log", cfg.LogFile) -} - -// TestProxy_LogFile_Config tests log file config when loaded from config -func TestProxy_LogFile_Config(t *testing.T) { - configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) - - cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Sanity check that the config value is the current value - assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "sanity check on log config failed") - assert.Equal(t, 2, cfg.LogRotateMaxFiles) - assert.Equal(t, 1048576, cfg.LogRotateBytes) - - // Parse the cli flags (but we pass in an empty slice) - cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} - f := cmd.Flags() - err = f.Parse([]string{}) - if err != nil { - t.Fatal(err) - } - - // Should change nothing... - cmd.applyConfigOverrides(f, cfg) - - assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "actual config check") - assert.Equal(t, 2, cfg.LogRotateMaxFiles) - assert.Equal(t, 1048576, cfg.LogRotateBytes) -} - -// TestProxy_Config_NewLogger_Default Tests defaults for log level and -// specifically cmd.newLogger() -func TestProxy_Config_NewLogger_Default(t *testing.T) { - cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} - cmd.config = proxyConfig.NewConfig() - logger, err := cmd.newLogger() - - assert.NoError(t, err) - assert.NotNil(t, logger) - assert.Equal(t, hclog.Info.String(), logger.GetLevel().String()) -} - -// TestProxy_Config_ReloadLogLevel Tests reloading updates the log -// level as expected. -func TestProxy_Config_ReloadLogLevel(t *testing.T) { - cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} - var err error - tempDir := t.TempDir() - - // Load an initial config - hcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) - configFile := populateTempFile(t, "proxy-config.hcl", hcl) - cmd.config, err = proxyConfig.LoadConfigFile(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Tweak the loaded config to make sure we can put log files into a temp dir - // and systemd log attempts work fine, this would usually happen during Run. - cmd.logWriter = os.Stdout - cmd.logger, err = cmd.newLogger() - if err != nil { - t.Fatal("logger required for systemd log messages", err) - } - - // Sanity check - assert.Equal(t, "warn", cmd.config.LogLevel) - - // Load a new config - hcl = strings.ReplaceAll(BasicHclConfig2, "TMPDIR", tempDir) - configFile = populateTempFile(t, "proxy-config.hcl", hcl) - err = cmd.reloadConfig([]string{configFile.Name()}) - assert.NoError(t, err) - assert.Equal(t, "debug", cmd.config.LogLevel) -} - -// TestProxy_Config_ReloadTls Tests that the TLS certs for the listener are -// correctly reloaded. -func TestProxy_Config_ReloadTls(t *testing.T) { - var wg sync.WaitGroup - wd, err := os.Getwd() - if err != nil { - t.Fatal("unable to get current working directory") - } - workingDir := filepath.Join(wd, "/proxy/test-fixtures/reload") - fooCert := "reload_foo.pem" - fooKey := "reload_foo.key" - - barCert := "reload_bar.pem" - barKey := "reload_bar.key" - - reloadCert := "reload_cert.pem" - reloadKey := "reload_key.pem" - caPem := "reload_ca.pem" - - tempDir := t.TempDir() - - // Set up initial 'foo' certs - inBytes, err := os.ReadFile(filepath.Join(workingDir, fooCert)) - if err != nil { - t.Fatal("unable to read cert required for test", fooCert, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert required for test", reloadCert, err) - } - - inBytes, err = os.ReadFile(filepath.Join(workingDir, fooKey)) - if err != nil { - t.Fatal("unable to read cert key required for test", fooKey, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert key required for test", reloadKey, err) - } - - inBytes, err = os.ReadFile(filepath.Join(workingDir, caPem)) - if err != nil { - t.Fatal("unable to read CA pem required for test", caPem, err) - } - certPool := x509.NewCertPool() - ok := certPool.AppendCertsFromPEM(inBytes) - if !ok { - t.Fatal("not ok when appending CA cert") - } - - replacedHcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) - configFile := populateTempFile(t, "proxy-config.hcl", replacedHcl) - - // Set up Proxy - logger := logging.NewVaultLogger(hclog.Trace) - ui, cmd := testProxyCommand(t, logger) - - wg.Add(1) - args := []string{"-config", configFile.Name()} - go func() { - if code := cmd.Run(args); code != 0 { - output := ui.ErrorWriter.String() + ui.OutputWriter.String() - t.Errorf("got a non-zero exit status: %s", output) - } - wg.Done() - }() - - testCertificateName := func(cn string) error { - conn, err := tls.Dial("tcp", "127.0.0.1:8100", &tls.Config{ - RootCAs: certPool, - }) - if err != nil { - return err - } - defer conn.Close() - if err = conn.Handshake(); err != nil { - return err - } - servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName - if servName != cn { - return fmt.Errorf("expected %s, got %s", cn, servName) - } - return nil - } - - // Start - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - - if err := testCertificateName("foo.example.com"); err != nil { - t.Fatalf("certificate name didn't check out: %s", err) - } - - // Swap out certs - inBytes, err = os.ReadFile(filepath.Join(workingDir, barCert)) - if err != nil { - t.Fatal("unable to read cert required for test", barCert, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert required for test", reloadCert, err) - } - - inBytes, err = os.ReadFile(filepath.Join(workingDir, barKey)) - if err != nil { - t.Fatal("unable to read cert key required for test", barKey, err) - } - err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) - if err != nil { - t.Fatal("unable to write temp cert key required for test", reloadKey, err) - } - - // Reload - cmd.SighupCh <- struct{}{} - select { - case <-cmd.reloadedCh: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - - if err := testCertificateName("bar.example.com"); err != nil { - t.Fatalf("certificate name didn't check out: %s", err) - } - - // Shut down - cmd.ShutdownCh <- struct{}{} - - wg.Wait() -} diff --git a/command/read.go b/command/read.go index 17b85529e12df..b12eb3f60ae1e 100644 --- a/command/read.go +++ b/command/read.go @@ -1,10 +1,6 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( - "context" "fmt" "io" "os" @@ -63,7 +59,7 @@ func (c *ReadCommand) AutocompleteFlags() complete.Flags { func (c *ReadCommand) Run(args []string) int { f := c.Flags() - if err := f.Parse(args, ParseOptionAllowRawFormat(true)); err != nil { + if err := f.Parse(args); err != nil { c.UI.Error(err.Error()) return 1 } @@ -81,10 +77,6 @@ func (c *ReadCommand) Run(args []string) int { return 2 } - // client.ReadRaw* methods require a manual timeout override - ctx, cancel := context.WithTimeout(context.Background(), client.ClientTimeout()) - defer cancel() - // Pull our fake stdin if needed stdin := (io.Reader)(os.Stdin) if c.testStdin != nil { @@ -99,40 +91,19 @@ func (c *ReadCommand) Run(args []string) int { return 1 } - if Format(c.UI) != "raw" { - secret, err := client.Logical().ReadWithDataWithContext(ctx, path, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Error reading %s: %s", path, err)) - return 2 - } - if secret == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", path)) - return 2 - } - - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) - } - - return OutputSecret(c.UI, secret) - } - - resp, err := client.Logical().ReadRawWithDataWithContext(ctx, path, data) + secret, err := client.Logical().ReadWithData(path, data) if err != nil { - c.UI.Error(fmt.Sprintf("Error reading: %s: %s", path, err)) + c.UI.Error(fmt.Sprintf("Error reading %s: %s", path, err)) return 2 } - if resp == nil || resp.Body == nil { + if secret == nil { c.UI.Error(fmt.Sprintf("No value found at %s", path)) return 2 } - defer resp.Body.Close() - contents, err := io.ReadAll(resp.Body) - if err != nil { - c.UI.Error(fmt.Sprintf("Error reading: %s: %s", path, err)) - return 2 + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) } - return OutputData(c.UI, contents) + return OutputSecret(c.UI, secret) } diff --git a/command/read_test.go b/command/read_test.go index fbe7ab414fa56..4a8ec877aa0f8 100644 --- a/command/read_test.go +++ b/command/read_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/rotate.go b/command/rotate.go index 7a174f34eb9c0..f366a6133b105 100644 --- a/command/rotate.go +++ b/command/rotate.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/rotate_test.go b/command/rotate_test.go index bfd48f7b026cb..37ac323405908 100644 --- a/command/rotate_test.go +++ b/command/rotate_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets.go b/command/secrets.go index 320167226c20c..06e63bec281fa 100644 --- a/command/secrets.go +++ b/command/secrets.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_disable.go b/command/secrets_disable.go index 8d782a524577b..47a61c5fe094c 100644 --- a/command/secrets_disable.go +++ b/command/secrets_disable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_disable_test.go b/command/secrets_disable_test.go index d7c7da713bd79..567c8956d6308 100644 --- a/command/secrets_disable_test.go +++ b/command/secrets_disable_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_enable.go b/command/secrets_enable.go index 39ce3bf1b8802..8be62953dca99 100644 --- a/command/secrets_enable.go +++ b/command/secrets_enable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -11,6 +8,7 @@ import ( "time" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -171,7 +169,7 @@ func (c *SecretsEnableCommand) Flags() *FlagSets { f.StringVar(&StringVar{ Name: "plugin-name", Target: &c.flagPluginName, - Completion: c.PredictVaultPlugins(api.PluginTypeSecrets, api.PluginTypeDatabase), + Completion: c.PredictVaultPlugins(consts.PluginTypeSecrets, consts.PluginTypeDatabase), Usage: "Name of the secrets engine plugin. This plugin name must already " + "exist in Vault's plugin catalog.", }) diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go index 93984b3c33dda..4e8a9d1fc9520 100644 --- a/command/secrets_enable_test.go +++ b/command/secrets_enable_test.go @@ -1,12 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( - "errors" "io/ioutil" - "os" "strings" "testing" @@ -221,10 +216,6 @@ func TestSecretsEnableCommand_Run(t *testing.T) { if f.Name() == "plugin" { continue } - if _, err := os.Stat("../builtin/logical/" + f.Name() + "/backend.go"); errors.Is(err, os.ErrNotExist) { - // Skip ext test packages (fake plugins without backends). - continue - } backends = append(backends, f.Name()) } } diff --git a/command/secrets_list.go b/command/secrets_list.go index 90a8fe8ed973c..998620f0964a4 100644 --- a/command/secrets_list.go +++ b/command/secrets_list.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_list_test.go b/command/secrets_list_test.go index 95b60e34071af..1aeee5bf67294 100644 --- a/command/secrets_list_test.go +++ b/command/secrets_list_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_move.go b/command/secrets_move.go index b74adcd6af7bd..458e3bbece7ad 100644 --- a/command/secrets_move.go +++ b/command/secrets_move.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_move_test.go b/command/secrets_move_test.go index 3aabaa179bbe3..153fbeb2cdc0b 100644 --- a/command/secrets_move_test.go +++ b/command/secrets_move_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_tune.go b/command/secrets_tune.go index 74753e29d333d..bf8fa3d593784 100644 --- a/command/secrets_tune.go +++ b/command/secrets_tune.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/secrets_tune_test.go b/command/secrets_tune_test.go index 25b8a7ce791fb..41c6bd2f6fd1d 100644 --- a/command/secrets_tune_test.go +++ b/command/secrets_tune_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -9,7 +6,8 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -152,7 +150,7 @@ func TestSecretsTuneCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Run("flags_all", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + pluginDir, cleanup := vault.MakeTestPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) @@ -181,7 +179,7 @@ func TestSecretsTuneCommand_Run(t *testing.T) { t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) } - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "pki", api.PluginTypeSecrets) + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "pki", consts.PluginTypeSecrets) code := cmd.Run([]string{ "-description", "new description", diff --git a/command/server.go b/command/server.go index 73b6659e906a3..69ffe9efc0f4e 100644 --- a/command/server.go +++ b/command/server.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -32,33 +29,29 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/gatedwriter" "github.com/hashicorp/go-secure-stdlib/mlock" - "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/audit" config2 "github.com/hashicorp/vault/command/config" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/helper/constants" - "github.com/hashicorp/vault/helper/experiments" - loghelper "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" - "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/sdk/helper/useragent" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/version" sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/hcp_link" vaultseal "github.com/hashicorp/vault/vault/seal" - "github.com/hashicorp/vault/version" "github.com/mitchellh/cli" "github.com/mitchellh/go-testing-interface" "github.com/pkg/errors" @@ -91,7 +84,6 @@ const ( type ServerCommand struct { *BaseCommand - logFlags logFlags AuditBackends map[string]audit.Factory CredentialBackends map[string]logical.Factory @@ -106,9 +98,9 @@ type ServerCommand struct { WaitGroup *sync.WaitGroup - logWriter io.Writer - logGate *gatedwriter.Writer - logger hclog.InterceptLogger + logOutput io.Writer + gatedWriter *gatedwriter.Writer + logger hclog.InterceptLogger cleanupGuard sync.Once @@ -120,9 +112,11 @@ type ServerCommand struct { allLoggers []hclog.Logger + // new stuff flagConfigs []string + flagLogLevel string + flagLogFormat string flagRecovery bool - flagExperiments []string flagDev bool flagDevTLS bool flagDevTLSCertDir string @@ -141,11 +135,12 @@ type ServerCommand struct { flagDevFourCluster bool flagDevTransactional bool flagDevAutoSeal bool - flagDevClusterJson string flagTestVerifyOnly bool + flagCombineLogs bool flagTestServerConfig bool flagDevConsul bool flagExitOnCoreShutdown bool + flagDiagnose string } func (c *ServerCommand) Synopsis() string { @@ -181,9 +176,6 @@ func (c *ServerCommand) Flags() *FlagSets { f := set.NewFlagSet("Command Options") - // Augment with the log flags - f.addLogFlags(&c.logFlags) - f.StringSliceVar(&StringSliceVar{ Name: "config", Target: &c.flagConfigs, @@ -198,6 +190,27 @@ func (c *ServerCommand) Flags() *FlagSets { ".hcl or .json are loaded.", }) + f.StringVar(&StringVar{ + Name: "log-level", + Target: &c.flagLogLevel, + Default: notSetValue, + EnvVar: "VAULT_LOG_LEVEL", + Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"), + Usage: "Log verbosity level. Supported values (in order of detail) are " + + "\"trace\", \"debug\", \"info\", \"warn\", and \"error\".", + }) + + f.StringVar(&StringVar{ + Name: "log-format", + Target: &c.flagLogFormat, + Default: notSetValue, + // EnvVar can't be just "VAULT_LOG_FORMAT", because more than one env var name is supported + // for backwards compatibility reasons. + // See github.com/hashicorp/vault/sdk/helper/logging.ParseEnvLogFormat() + Completion: complete.PredictSet("standard", "json"), + Usage: `Log format. Supported values are "standard" and "json".`, + }) + f.BoolVar(&BoolVar{ Name: "exit-on-core-shutdown", Target: &c.flagExitOnCoreShutdown, @@ -212,16 +225,18 @@ func (c *ServerCommand) Flags() *FlagSets { "Using a recovery operation token, \"sys/raw\" API can be used to manipulate the storage.", }) - f.StringSliceVar(&StringSliceVar{ - Name: "experiment", - Target: &c.flagExperiments, - Completion: complete.PredictSet(experiments.ValidExperiments()...), - Usage: "Name of an experiment to enable. Experiments should NOT be used in production, and " + - "the associated APIs may have backwards incompatible changes between releases. This " + - "flag can be specified multiple times to specify multiple experiments. This can also be " + - fmt.Sprintf("specified via the %s environment variable as a comma-separated list. ", EnvVaultExperiments) + - "Valid experiments are: " + strings.Join(experiments.ValidExperiments(), ", "), - }) + // Disabled by default until functional + if os.Getenv(OperatorDiagnoseEnableEnv) != "" { + f.StringVar(&StringVar{ + Name: "diagnose", + Target: &c.flagDiagnose, + Default: notSetValue, + Usage: "Run diagnostics before starting Vault. Specify a filename to direct output to that file.", + }) + } else { + // Ensure diagnose is *not* run when feature flag is off. + c.flagDiagnose = notSetValue + } f = set.NewFlagSet("Dev Options") @@ -373,13 +388,14 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) - f.StringVar(&StringVar{ - Name: "dev-cluster-json", - Target: &c.flagDevClusterJson, - Usage: "File to write cluster definition to", + // TODO: should the below flags be public? + f.BoolVar(&BoolVar{ + Name: "combine-logs", + Target: &c.flagCombineLogs, + Default: false, + Hidden: true, }) - // TODO: should the below flags be public? f.BoolVar(&BoolVar{ Name: "test-verify-only", Target: &c.flagTestVerifyOnly, @@ -409,8 +425,8 @@ func (c *ServerCommand) AutocompleteFlags() complete.Flags { func (c *ServerCommand) flushLog() { c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{ - Output: c.logWriter, - }, c.logGate) + Output: c.logOutput, + }, c.gatedWriter) } func (c *ServerCommand) parseConfig() (*server.Config, []configutil.ConfigError, error) { @@ -457,15 +473,20 @@ func (c *ServerCommand) runRecoveryMode() int { return 1 } - // Update the 'log' related aspects of shared config based on config/env var/cli - c.flags.applyLogConfigOverrides(config.SharedConfig) - l, err := c.configureLogging(config) + level, logLevelString, logLevelWasNotSet, logFormat, err := c.processLogLevelAndFormat(config) if err != nil { c.UI.Error(err.Error()) return 1 } - c.logger = l - c.allLoggers = append(c.allLoggers, l) + + c.logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Output: c.gatedWriter, + Level: level, + IndependentLevels: true, + // Note that if logFormat is either unspecified or standard, then + // the resulting logger's format will be standard. + JSONFormat: logFormat == logging.JSONFormat, + }) // reporting Errors found in the config for _, cErr := range configErrors { @@ -475,6 +496,15 @@ func (c *ServerCommand) runRecoveryMode() int { // Ensure logging is flushed if initialization fails defer c.flushLog() + logLevelStr, err := c.adjustLogLevel(config, logLevelWasNotSet) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + if logLevelStr != "" { + logLevelString = logLevelStr + } + // create GRPC logger namedGRPCLogFaker := c.logger.Named("grpclogfaker") grpclog.SetLogger(&grpclogFaker{ @@ -519,7 +549,7 @@ func (c *ServerCommand) runRecoveryMode() int { infoKeys := make([]string, 0, 10) info := make(map[string]string) - info["log level"] = config.LogLevel + info["log level"] = logLevelString infoKeys = append(infoKeys, "log level") var barrierSeal vault.Seal @@ -548,7 +578,9 @@ func (c *ServerCommand) runRecoveryMode() int { info["Seal Type"] = sealType var seal vault.Seal - defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper())) + defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewShamirWrapper(), + }) sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger) if sealConfigError != nil { @@ -561,7 +593,9 @@ func (c *ServerCommand) runRecoveryMode() int { if wrapper == nil { seal = defaultSeal } else { - seal, err = vault.NewAutoSeal(vaultseal.NewAccess(wrapper)) + seal, err = vault.NewAutoSeal(&vaultseal.Access{ + Wrapper: wrapper, + }) if err != nil { c.UI.Error(fmt.Sprintf("error creating auto seal: %v", err)) } @@ -580,7 +614,7 @@ func (c *ServerCommand) runRecoveryMode() int { Physical: backend, StorageType: config.Storage.Type, Seal: barrierSeal, - LogLevel: config.LogLevel, + LogLevel: logLevelString, Logger: c.logger, DisableMlock: config.DisableMlock, RecoveryMode: c.flagRecovery, @@ -612,7 +646,7 @@ func (c *ServerCommand) runRecoveryMode() int { // Initialize the listeners lns := make([]listenerutil.Listener, 0, len(config.Listeners)) for _, lnConfig := range config.Listeners { - ln, _, _, err := server.NewListener(lnConfig, c.logGate, c.UI) + ln, _, _, err := server.NewListener(lnConfig, c.gatedWriter, c.UI) if err != nil { c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err)) return 1 @@ -669,14 +703,8 @@ func (c *ServerCommand) runRecoveryMode() int { c.UI.Output("") - // Tests might not want to start a vault server and just want to verify - // the configuration. - if c.flagTestVerifyOnly { - return 0 - } - for _, ln := range lns { - handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ + handler := vaulthttp.Handler(&vault.HandlerProperties{ Core: core, ListenerConfig: ln.Config, DisablePrintableCheck: config.DisablePrintableCheck, @@ -714,7 +742,7 @@ func (c *ServerCommand) runRecoveryMode() int { c.UI.Warn("") } - if !c.logFlags.flagCombineLogs { + if !c.flagCombineLogs { c.UI.Output("==> Vault server started! Log data will stream in below:\n") } @@ -766,6 +794,82 @@ func logProxyEnvironmentVariables(logger hclog.Logger) { "https_proxy", cfgMap["https_proxy"], "no_proxy", cfgMap["no_proxy"]) } +func (c *ServerCommand) adjustLogLevel(config *server.Config, logLevelWasNotSet bool) (string, error) { + var logLevelString string + if config.LogLevel != "" && logLevelWasNotSet { + configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) + logLevelString = configLogLevel + switch configLogLevel { + case "trace": + c.logger.SetLevel(hclog.Trace) + case "debug": + c.logger.SetLevel(hclog.Debug) + case "notice", "info", "": + c.logger.SetLevel(hclog.Info) + case "warn", "warning": + c.logger.SetLevel(hclog.Warn) + case "err", "error": + c.logger.SetLevel(hclog.Error) + default: + return "", fmt.Errorf("unknown log level: %s", config.LogLevel) + } + } + return logLevelString, nil +} + +func (c *ServerCommand) processLogLevelAndFormat(config *server.Config) (hclog.Level, string, bool, logging.LogFormat, error) { + // Create a logger. We wrap it in a gated writer so that it doesn't + // start logging too early. + c.logOutput = os.Stderr + if c.flagCombineLogs { + c.logOutput = os.Stdout + } + c.gatedWriter = gatedwriter.NewWriter(c.logOutput) + var level hclog.Level + var logLevelWasNotSet bool + logFormat := logging.UnspecifiedFormat + logLevelString := c.flagLogLevel + c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel)) + switch c.flagLogLevel { + case notSetValue, "": + logLevelWasNotSet = true + logLevelString = "info" + level = hclog.Info + case "trace": + level = hclog.Trace + case "debug": + level = hclog.Debug + case "notice", "info": + level = hclog.Info + case "warn", "warning": + level = hclog.Warn + case "err", "error": + level = hclog.Error + default: + return level, logLevelString, logLevelWasNotSet, logFormat, fmt.Errorf("unknown log level: %s", c.flagLogLevel) + } + + if c.flagLogFormat != notSetValue { + var err error + logFormat, err = logging.ParseLogFormat(c.flagLogFormat) + if err != nil { + return level, logLevelString, logLevelWasNotSet, logFormat, err + } + } + if logFormat == logging.UnspecifiedFormat { + logFormat = logging.ParseEnvLogFormat() + } + if logFormat == logging.UnspecifiedFormat { + var err error + logFormat, err = logging.ParseLogFormat(config.LogFormat) + if err != nil { + return level, logLevelString, logLevelWasNotSet, logFormat, err + } + } + + return level, logLevelString, logLevelWasNotSet, logFormat, nil +} + type quiescenceSink struct { t *time.Timer } @@ -857,7 +961,7 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b var errMsg error for i, lnConfig := range config.Listeners { - ln, props, reloadFunc, err := server.NewListener(lnConfig, c.logGate, c.UI) + ln, props, reloadFunc, err := server.NewListener(lnConfig, c.gatedWriter, c.UI) if err != nil { errMsg = fmt.Errorf("Error initializing listener of type %s: %s", lnConfig.Type, err) return 1, nil, nil, errMsg @@ -930,69 +1034,6 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b return 0, lns, clusterAddrs, nil } -func configureDevTLS(c *ServerCommand) (func(), *server.Config, string, error) { - var devStorageType string - - switch { - case c.flagDevConsul: - devStorageType = "consul" - case c.flagDevHA && c.flagDevTransactional: - devStorageType = "inmem_transactional_ha" - case !c.flagDevHA && c.flagDevTransactional: - devStorageType = "inmem_transactional" - case c.flagDevHA && !c.flagDevTransactional: - devStorageType = "inmem_ha" - default: - devStorageType = "inmem" - } - - var certDir string - var err error - var config *server.Config - var f func() - - if c.flagDevTLS { - if c.flagDevTLSCertDir != "" { - if _, err = os.Stat(c.flagDevTLSCertDir); err != nil { - return nil, nil, "", err - } - - certDir = c.flagDevTLSCertDir - } else { - if certDir, err = os.MkdirTemp("", "vault-tls"); err != nil { - return nil, nil, certDir, err - } - } - config, err = server.DevTLSConfig(devStorageType, certDir) - - f = func() { - if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)); err != nil { - c.UI.Error(err.Error()) - } - - if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)); err != nil { - c.UI.Error(err.Error()) - } - - if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)); err != nil { - c.UI.Error(err.Error()) - } - - // Only delete temp directories we made. - if c.flagDevTLSCertDir == "" { - if err := os.Remove(certDir); err != nil { - c.UI.Error(err.Error()) - } - } - } - - } else { - config, err = server.DevConfig(devStorageType) - } - - return f, config, certDir, err -} - func (c *ServerCommand) Run(args []string) int { f := c.Flags() @@ -1001,13 +1042,6 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - c.logGate = gatedwriter.NewWriter(os.Stderr) - c.logWriter = c.logGate - - if c.logFlags.flagCombineLogs { - c.logWriter = os.Stdout - } - if c.flagRecovery { return c.runRecoveryMode() } @@ -1031,13 +1065,85 @@ func (c *ServerCommand) Run(args []string) int { } } + if c.flagDiagnose != notSetValue { + if c.flagDev { + c.UI.Error("Cannot run diagnose on Vault in dev mode.") + return 1 + } + // TODO: add a file output flag to Diagnose + diagnose := &OperatorDiagnoseCommand{ + BaseCommand: c.BaseCommand, + flagDebug: false, + flagSkips: []string{}, + flagConfigs: c.flagConfigs, + } + diagnose.RunWithParsedFlags() + } + // Load the configuration var config *server.Config + var err error var certDir string if c.flagDev { - df, cfg, dir, err := configureDevTLS(c) - if df != nil { - defer df() + var devStorageType string + switch { + case c.flagDevConsul: + devStorageType = "consul" + case c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional_ha" + case !c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional" + case c.flagDevHA && !c.flagDevTransactional: + devStorageType = "inmem_ha" + default: + devStorageType = "inmem" + } + + if c.flagDevTLS { + if c.flagDevTLSCertDir != "" { + _, err := os.Stat(c.flagDevTLSCertDir) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + certDir = c.flagDevTLSCertDir + } else { + certDir, err = os.MkdirTemp("", "vault-tls") + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + config, err = server.DevTLSConfig(devStorageType, certDir) + + defer func() { + err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)) + if err != nil { + c.UI.Error(err.Error()) + } + + err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)) + if err != nil { + c.UI.Error(err.Error()) + } + + err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)) + if err != nil { + c.UI.Error(err.Error()) + } + + // Only delete temp directories we made. + if c.flagDevTLSCertDir == "" { + err = os.Remove(certDir) + if err != nil { + c.UI.Error(err.Error()) + } + } + }() + + } else { + config, err = server.DevConfig(devStorageType) } if err != nil { @@ -1045,9 +1151,6 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - config = cfg - certDir = dir - if c.flagDevListenAddr != "" { config.Listeners[0].Address = c.flagDevListenAddr } @@ -1075,20 +1178,31 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - f.applyLogConfigOverrides(config.SharedConfig) - - // Set 'trace' log level for the following 'dev' clusters - if c.flagDevThreeNode || c.flagDevFourCluster { - config.LogLevel = "trace" - } - - l, err := c.configureLogging(config) + level, logLevelString, logLevelWasNotSet, logFormat, err := c.processLogLevelAndFormat(config) if err != nil { c.UI.Error(err.Error()) return 1 } - c.logger = l - c.allLoggers = append(c.allLoggers, l) + + config.LogFormat = logFormat.String() + + if c.flagDevThreeNode || c.flagDevFourCluster { + c.logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Mutex: &sync.Mutex{}, + Output: c.gatedWriter, + Level: hclog.Trace, + IndependentLevels: true, + }) + } else { + c.logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Output: c.gatedWriter, + Level: level, + IndependentLevels: true, + // Note that if logFormat is either unspecified or standard, then + // the resulting logger's format will be standard. + JSONFormat: logFormat == logging.JSONFormat, + }) + } // reporting Errors found in the config for _, cErr := range configErrors { @@ -1098,6 +1212,17 @@ func (c *ServerCommand) Run(args []string) int { // Ensure logging is flushed if initialization fails defer c.flushLog() + c.allLoggers = []hclog.Logger{c.logger} + + logLevelStr, err := c.adjustLogLevel(config, logLevelWasNotSet) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + if logLevelStr != "" { + logLevelString = logLevelStr + } + // create GRPC logger namedGRPCLogFaker := c.logger.Named("grpclogfaker") c.allLoggers = append(c.allLoggers, namedGRPCLogFaker) @@ -1131,10 +1256,14 @@ func (c *ServerCommand) Run(args []string) int { if envLicense := os.Getenv(EnvVaultLicense); envLicense != "" { config.License = envLicense } - - if err := server.ExperimentsFromEnvAndCLI(config, EnvVaultExperiments, c.flagExperiments); err != nil { - c.UI.Error(err.Error()) - return 1 + if disableSSC := os.Getenv(DisableSSCTokens); disableSSC != "" { + var err error + config.DisableSSCTokens, err = strconv.ParseBool(disableSSC) + if err != nil { + c.UI.Warn(wrapAtLength("WARNING! failed to parse " + + "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS env var: " + + "setting to default value false")) + } } // If mlockall(2) isn't supported, show a warning. We disable this in dev @@ -1164,18 +1293,16 @@ func (c *ServerCommand) Run(args []string) int { metricsHelper := metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) // Initialize the storage backend - var backend physical.Backend - if !c.flagDev || config.Storage != nil { - backend, err = c.setupStorage(config) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - // Prevent server startup if migration is active - // TODO: Use OpenTelemetry to integrate this into Diagnose - if c.storageMigrationActive(backend) { - return 1 - } + backend, err := c.setupStorage(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Prevent server startup if migration is active + // TODO: Use OpenTelemetry to integrate this into Diagnose + if c.storageMigrationActive(backend) { + return 1 } // Initialize the Service Discovery, if there is one @@ -1190,7 +1317,7 @@ func (c *ServerCommand) Run(args []string) int { infoKeys := make([]string, 0, 10) info := make(map[string]string) - info["log level"] = config.LogLevel + info["log level"] = logLevelString infoKeys = append(infoKeys, "log level") // returns a slice of env vars formatted as "key=value" @@ -1207,12 +1334,6 @@ func (c *ServerCommand) Run(args []string) int { info[key] = strings.Join(envVarKeys, ", ") infoKeys = append(infoKeys, key) - if len(config.Experiments) != 0 { - expKey := "experiments" - info[expKey] = strings.Join(config.Experiments, ", ") - infoKeys = append(infoKeys, expKey) - } - barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(c, config, infoKeys, info) // Check error here if err != nil { @@ -1220,20 +1341,16 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - for _, seal := range seals { - // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir - // section. - if seal == nil { - continue + if seals != nil { + for _, seal := range seals { + // Ensure that the seal finalizer is called, even if using verify-only + defer func(seal *vault.Seal) { + err = (*seal).Finalize(context.Background()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + }(&seal) } - seal := seal // capture range variable - // Ensure that the seal finalizer is called, even if using verify-only - defer func(seal *vault.Seal) { - err = (*seal).Finalize(context.Background()) - if err != nil { - c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) - } - }(&seal) } if barrierSeal == nil { @@ -1417,17 +1534,6 @@ func (c *ServerCommand) Run(args []string) int { info["fips"] = fipsStatus } - if config.HCPLinkConf != nil { - infoKeys = append(infoKeys, "HCP organization") - info["HCP organization"] = config.HCPLinkConf.Resource.Organization - - infoKeys = append(infoKeys, "HCP project") - info["HCP project"] = config.HCPLinkConf.Resource.Project - - infoKeys = append(infoKeys, "HCP resource ID") - info["HCP resource ID"] = config.HCPLinkConf.Resource.ID - } - sort.Strings(infoKeys) c.UI.Output("==> Vault server configuration:\n") @@ -1449,7 +1555,7 @@ func (c *ServerCommand) Run(args []string) int { // This needs to happen before we first unseal, so before we trigger dev // mode if it's set core.SetClusterListenerAddrs(clusterAddrs) - core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{ + core.SetClusterHandler(vaulthttp.Handler(&vault.HandlerProperties{ Core: core, })) @@ -1484,8 +1590,7 @@ func (c *ServerCommand) Run(args []string) int { } // If we're in Dev mode, then initialize the core - clusterJson := &testcluster.ClusterJson{} - err = initDevCore(c, &coreConfig, config, core, certDir, clusterJson) + err = initDevCore(c, &coreConfig, config, core, certDir) if err != nil { c.UI.Error(err.Error()) return 1 @@ -1498,12 +1603,12 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - hcpLogger := c.logger.Named("hcp-connectivity") + hcpLogger := c.logger.Named("hcpLink") hcpLink, err := hcp_link.NewHCPLink(config.HCPLinkConf, core, hcpLogger) if err != nil { - c.logger.Error("failed to establish HCP connection", "error", err) + c.logger.Error("failed to start HCP Link", "error", err) } else if hcpLink != nil { - c.logger.Trace("established HCP connection") + c.logger.Trace("started HCP link") } if c.flagTestServerConfig { @@ -1523,7 +1628,7 @@ func (c *ServerCommand) Run(args []string) int { } // Output the header that the server has started - if !c.logFlags.flagCombineLogs { + if !c.flagCombineLogs { c.UI.Output("==> Vault server started! Log data will stream in below:\n") } @@ -1545,34 +1650,6 @@ func (c *ServerCommand) Run(args []string) int { // Notify systemd that the server is ready (if applicable) c.notifySystemd(systemd.SdNotifyReady) - if c.flagDev { - protocol := "http://" - if c.flagDevTLS { - protocol = "https://" - } - clusterJson.Nodes = []testcluster.ClusterNode{ - { - APIAddress: protocol + config.Listeners[0].Address, - }, - } - if c.flagDevTLS { - clusterJson.CACertPath = fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename) - } - - if c.flagDevClusterJson != "" && !c.flagDevThreeNode { - b, err := jsonutil.EncodeJSON(clusterJson) - if err != nil { - c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) - return 1 - } - err = os.WriteFile(c.flagDevClusterJson, b, 0o600) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) - return 1 - } - } - } - defer func() { if err := c.removePidFile(config.PidFile); err != nil { c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) @@ -1605,6 +1682,7 @@ func (c *ServerCommand) Run(args []string) int { // Check for new log level var config *server.Config + var level hclog.Level var configErrors []configutil.ConfigError for _, path := range c.flagConfigs { current, err := server.LoadConfig(path) @@ -1650,10 +1728,20 @@ func (c *ServerCommand) Run(args []string) int { c.logger.Error(err.Error()) } - // Reload log level for loggers if config.LogLevel != "" { - level, err := loghelper.ParseLogLevel(config.LogLevel) - if err != nil { + configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) + switch configLogLevel { + case "trace": + level = hclog.Trace + case "debug": + level = hclog.Debug + case "notice", "info", "": + level = hclog.Info + case "warn", "warning": + level = hclog.Warn + case "err", "error": + level = hclog.Error + default: c.logger.Error("unknown log level found on reload", "level", config.LogLevel) goto RUNRELOADFUNCS } @@ -1661,7 +1749,7 @@ func (c *ServerCommand) Run(args []string) int { } RUNRELOADFUNCS: - if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs, core); err != nil { + if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil { c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } @@ -1670,9 +1758,6 @@ func (c *ServerCommand) Run(args []string) int { c.UI.Error(err.Error()) } - if err := core.ReloadCensus(); err != nil { - c.UI.Error(err.Error()) - } select { case c.licenseReloadedCh <- err: default: @@ -1787,38 +1872,7 @@ func (c *ServerCommand) Run(args []string) int { return retCode } -// configureLogging takes the configuration and attempts to parse config values into 'log' friendly configuration values -// If all goes to plan, a logger is created and setup. -func (c *ServerCommand) configureLogging(config *server.Config) (hclog.InterceptLogger, error) { - // Parse all the log related config - logLevel, err := loghelper.ParseLogLevel(config.LogLevel) - if err != nil { - return nil, err - } - - logFormat, err := loghelper.ParseLogFormat(config.LogFormat) - if err != nil { - return nil, err - } - - logRotateDuration, err := parseutil.ParseDurationSecond(config.LogRotateDuration) - if err != nil { - return nil, err - } - - logCfg := &loghelper.LogConfig{ - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: config.LogRotateBytes, - LogRotateMaxFiles: config.LogRotateMaxFiles, - } - - return loghelper.Setup(logCfg, c.logWriter) -} - -func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.HCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.HCPLinkVault, error) { +func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.WrappedHCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.WrappedHCPLinkVault, error) { // trigger a shutdown if hcpLinkVault != nil { err := hcpLinkVault.Shutdown() @@ -2003,33 +2057,14 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig } func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { - conf, opts := teststorage.ClusterSetup(base, &vault.TestClusterOptions{ + testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, BaseListenAddress: c.flagDevListenAddr, Logger: c.logger, TempDir: tempDir, - DefaultHandlerProperties: vault.HandlerProperties{ - ListenerConfig: &configutil.Listener{ - Profiling: configutil.ListenerProfiling{ - UnauthenticatedPProfAccess: true, - }, - Telemetry: configutil.ListenerTelemetry{ - UnauthenticatedMetricsAccess: true, - }, - }, - }, - }, nil) - testCluster := vault.NewTestCluster(&testing.RuntimeT{}, conf, opts) + }) defer c.cleanupGuard.Do(testCluster.Cleanup) - if constants.IsEnterprise { - err := testcluster.WaitForActiveNodeAndPerfStandbys(context.Background(), testCluster) - if err != nil { - c.UI.Error(fmt.Sprintf("perf standbys didn't become ready: %v", err)) - return 1 - } - } - info["cluster parameters path"] = testCluster.TempDir infoKeys = append(infoKeys, "cluster parameters path") @@ -2078,7 +2113,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m c.UI.Output("") for _, core := range testCluster.Cores { - core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{ + core.Server.Handler = vaulthttp.Handler(&vault.HandlerProperties{ Core: core.Core, }) core.SetClusterHandler(core.Server.Handler) @@ -2170,29 +2205,6 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m testCluster.TempDir, )) - if c.flagDevClusterJson != "" { - clusterJson := testcluster.ClusterJson{ - Nodes: []testcluster.ClusterNode{}, - CACertPath: filepath.Join(testCluster.TempDir, "ca_cert.pem"), - RootToken: testCluster.RootToken, - } - for _, core := range testCluster.Cores { - clusterJson.Nodes = append(clusterJson.Nodes, testcluster.ClusterNode{ - APIAddress: core.Client.Address(), - }) - } - b, err := jsonutil.EncodeJSON(clusterJson) - if err != nil { - c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) - return 1 - } - err = os.WriteFile(c.flagDevClusterJson, b, 0o600) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) - return 1 - } - } - // Output the header that the server has started c.UI.Output("==> Vault server started! Log data will stream in below:\n") @@ -2230,7 +2242,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m case <-c.SighupCh: c.UI.Output("==> Vault reload triggered") for _, core := range testCluster.Cores { - if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil, core.Core); err != nil { + if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil { c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } } @@ -2348,7 +2360,7 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, return url.String(), nil } -func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reloadutil.ReloadFunc, configPath []string, core *vault.Core) error { +func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reloadutil.ReloadFunc, configPath []string) error { lock.RLock() defer lock.RUnlock() @@ -2376,9 +2388,6 @@ func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]rel } } - // Set Introspection Endpoint to enabled with new value in the config after reload - core.ReloadIntrospectionEndpointEnabled() - // Send a message that we reloaded. This prevents "guessing" sleep times // in tests. select { @@ -2446,11 +2455,9 @@ func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool { } c.logger.Warn("storage migration check error", "error", err.Error()) - timer := time.NewTimer(2 * time.Second) select { - case <-timer.C: + case <-time.After(2 * time.Second): case <-c.ShutdownCh: - timer.Stop() return true } } @@ -2489,8 +2496,7 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma var barrierWrapper wrapping.Wrapper if c.flagDevAutoSeal { var err error - access, _ := vaultseal.NewTestSeal(nil) - barrierSeal, err = vault.NewAutoSeal(access) + barrierSeal, err = vault.NewAutoSeal(vaultseal.NewTestSeal(nil)) if err != nil { return nil, nil, nil, nil, nil, err } @@ -2521,7 +2527,9 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma var seal vault.Seal sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) c.allLoggers = append(c.allLoggers, sealLogger) - defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper())) + defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewShamirWrapper(), + }) var sealInfoKeys []string sealInfoMap := map[string]string{} wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger) @@ -2535,7 +2543,9 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma seal = defaultSeal } else { var err error - seal, err = vault.NewAutoSeal(vaultseal.NewAccess(wrapper)) + seal, err = vault.NewAutoSeal(&vaultseal.Access{ + Wrapper: wrapper, + }) if err != nil { return nil, nil, nil, nil, nil, err } @@ -2735,12 +2745,10 @@ func runUnseal(c *ServerCommand, core *vault.Core, ctx context.Context) { } c.logger.Warn("failed to unseal core", "error", err) - timer := time.NewTimer(5 * time.Second) select { case <-c.ShutdownCh: - timer.Stop() return - case <-timer.C: + case <-time.After(5 * time.Second): } } } @@ -2761,7 +2769,6 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. CredentialBackends: c.CredentialBackends, LogicalBackends: c.LogicalBackends, Logger: c.logger, - DetectDeadlocks: config.DetectDeadlocks, DisableSentinelTrace: config.DisableSentinelTrace, DisableCache: config.DisableCache, DisableMlock: config.DisableMlock, @@ -2774,7 +2781,6 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. PluginFilePermissions: config.PluginFilePermissions, EnableUI: config.EnableUI, EnableRaw: config.EnableRawEndpoint, - EnableIntrospection: config.EnableIntrospectionEndpoint, DisableSealWrap: config.DisableSealWrap, DisablePerformanceStandby: config.DisablePerformanceStandby, DisableIndexing: config.DisableIndexing, @@ -2789,12 +2795,10 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. License: config.License, LicensePath: config.LicensePath, DisableSSCTokens: config.DisableSSCTokens, - Experiments: config.Experiments, } if c.flagDev { coreConfig.EnableRaw = true - coreConfig.EnableIntrospection = true coreConfig.DevToken = c.flagDevRootTokenID if c.flagDevLeasedKV { coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory @@ -2823,7 +2827,7 @@ func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server return nil } -func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string, clusterJSON *testcluster.ClusterJson) error { +func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string) error { if c.flagDev && !c.flagDevSkipInit { init, err := c.enableDev(core, coreConfig) @@ -2831,10 +2835,6 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. return fmt.Errorf("Error initializing Dev mode: %s", err) } - if clusterJSON != nil { - clusterJSON.RootToken = init.RootToken - } - var plugins, pluginsNotLoaded []string if c.flagDevPluginDir != "" && c.flagDevPluginInit { @@ -2970,7 +2970,7 @@ func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, return err } - handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ + handler := vaulthttp.Handler(&vault.HandlerProperties{ Core: core, ListenerConfig: ln.Config, DisablePrintableCheck: config.DisablePrintableCheck, diff --git a/command/server/config.go b/command/server/config.go index 34e4848004b72..d71a3ad236426 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package server import ( @@ -19,13 +16,9 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/vault/helper/experiments" "github.com/hashicorp/vault/helper/osutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/hashicorp/vault/sdk/helper/testcluster" - "github.com/mitchellh/mapstructure" ) const ( @@ -34,14 +27,9 @@ const ( VaultDevKeyFilename = "vault-key.pem" ) -var ( - entConfigValidate = func(_ *Config, _ string) []configutil.ConfigError { - return nil - } - - // Modified internally for testing. - validExperiments = experiments.ValidExperiments() -) +var entConfigValidate = func(_ *Config, _ string) []configutil.ConfigError { + return nil +} // Config is the configuration for the vault server. type Config struct { @@ -56,8 +44,6 @@ type Config struct { ServiceRegistration *ServiceRegistration `hcl:"-"` - Experiments []string `hcl:"experiments"` - CacheSize int `hcl:"cache_size"` DisableCache bool `hcl:"-"` DisableCacheRaw interface{} `hcl:"disable_cache"` @@ -81,9 +67,6 @@ type Config struct { PluginFilePermissions int `hcl:"-"` PluginFilePermissionsRaw interface{} `hcl:"plugin_file_permissions,alias:PluginFilePermissions"` - EnableIntrospectionEndpoint bool `hcl:"-"` - EnableIntrospectionEndpointRaw interface{} `hcl:"introspection_endpoint,alias:EnableIntrospectionEndpoint"` - EnableRawEndpoint bool `hcl:"-"` EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint,alias:EnableRawEndpoint"` @@ -110,8 +93,6 @@ type Config struct { LogRequestsLevel string `hcl:"-"` LogRequestsLevelRaw interface{} `hcl:"log_requests_level"` - DetectDeadlocks string `hcl:"detect_deadlocks"` - EnableResponseHeaderRaftNodeID bool `hcl:"-"` EnableResponseHeaderRaftNodeIDRaw interface{} `hcl:"enable_response_header_raft_node_id"` @@ -343,11 +324,6 @@ func (c *Config) Merge(c2 *Config) *Config { result.EnableRawEndpoint = c2.EnableRawEndpoint } - result.EnableIntrospectionEndpoint = c.EnableIntrospectionEndpoint - if c2.EnableIntrospectionEndpoint { - result.EnableIntrospectionEndpoint = c2.EnableIntrospectionEndpoint - } - result.APIAddr = c.APIAddr if c2.APIAddr != "" { result.APIAddr = c2.APIAddr @@ -407,11 +383,6 @@ func (c *Config) Merge(c2 *Config) *Config { result.LogRequestsLevel = c2.LogRequestsLevel } - result.DetectDeadlocks = c.DetectDeadlocks - if c2.DetectDeadlocks != "" { - result.DetectDeadlocks = c2.DetectDeadlocks - } - result.EnableResponseHeaderRaftNodeID = c.EnableResponseHeaderRaftNodeID if c2.EnableResponseHeaderRaftNodeID { result.EnableResponseHeaderRaftNodeID = c2.EnableResponseHeaderRaftNodeID @@ -449,8 +420,6 @@ func (c *Config) Merge(c2 *Config) *Config { result.entConfig = c.entConfig.Merge(c2.entConfig) - result.Experiments = mergeExperiments(c.Experiments, c2.Experiments) - return result } @@ -617,12 +586,6 @@ func ParseConfig(d, source string) (*Config, error) { } } - if result.EnableIntrospectionEndpointRaw != nil { - if result.EnableIntrospectionEndpoint, err = parseutil.ParseBool(result.EnableIntrospectionEndpointRaw); err != nil { - return nil, err - } - } - if result.DisableClusteringRaw != nil { if result.DisableClustering, err = parseutil.ParseBool(result.DisableClusteringRaw); err != nil { return nil, err @@ -728,10 +691,6 @@ func ParseConfig(d, source string) (*Config, error) { } } - if err := validateExperiments(result.Experiments); err != nil { - return nil, fmt.Errorf("error validating experiment(s) from config: %w", err) - } - if err := result.parseConfig(list); err != nil { return nil, fmt.Errorf("error parsing enterprise config: %w", err) } @@ -748,69 +707,6 @@ func ParseConfig(d, source string) (*Config, error) { return result, nil } -func ExperimentsFromEnvAndCLI(config *Config, envKey string, flagExperiments []string) error { - if envExperimentsRaw := os.Getenv(envKey); envExperimentsRaw != "" { - envExperiments := strings.Split(envExperimentsRaw, ",") - err := validateExperiments(envExperiments) - if err != nil { - return fmt.Errorf("error validating experiment(s) from environment variable %q: %w", envKey, err) - } - - config.Experiments = mergeExperiments(config.Experiments, envExperiments) - } - - if len(flagExperiments) != 0 { - err := validateExperiments(flagExperiments) - if err != nil { - return fmt.Errorf("error validating experiment(s) from command line flag: %w", err) - } - - config.Experiments = mergeExperiments(config.Experiments, flagExperiments) - } - - return nil -} - -// Validate checks each experiment is a known experiment. -func validateExperiments(experiments []string) error { - var invalid []string - - for _, experiment := range experiments { - if !strutil.StrListContains(validExperiments, experiment) { - invalid = append(invalid, experiment) - } - } - - if len(invalid) != 0 { - return fmt.Errorf("valid experiment(s) are %s, but received the following invalid experiment(s): %s", - strings.Join(validExperiments, ", "), - strings.Join(invalid, ", ")) - } - - return nil -} - -// mergeExperiments returns the logical OR of the two sets. -func mergeExperiments(left, right []string) []string { - processed := map[string]struct{}{} - var result []string - for _, l := range left { - if _, seen := processed[l]; !seen { - result = append(result, l) - } - processed[l] = struct{}{} - } - - for _, r := range right { - if _, seen := processed[r]; !seen { - result = append(result, r) - processed[r] = struct{}{} - } - } - - return result -} - // LoadConfigDir loads all the configurations in the given directory // in alphabetical order. func LoadConfigDir(dir string) (*Config, error) { @@ -1111,8 +1007,6 @@ func (c *Config) Sanitized() map[string]interface{} { "raw_storage_endpoint": c.EnableRawEndpoint, - "introspection_endpoint": c.EnableIntrospectionEndpoint, - "api_addr": c.APIAddr, "cluster_addr": c.ClusterAddr, "disable_clustering": c.DisableClustering, @@ -1128,9 +1022,6 @@ func (c *Config) Sanitized() map[string]interface{} { "enable_response_header_raft_node_id": c.EnableResponseHeaderRaftNodeID, "log_requests_level": c.LogRequestsLevel, - "experiments": c.Experiments, - - "detect_deadlocks": c.DetectDeadlocks, } for k, v := range sharedResult { result[k] = v @@ -1209,12 +1100,3 @@ func (c *Config) found(s, k string) { delete(c.UnusedKeys, s) c.FoundKeys = append(c.FoundKeys, k) } - -func (c *Config) ToVaultNodeConfig() (*testcluster.VaultNodeConfig, error) { - var vnc testcluster.VaultNodeConfig - err := mapstructure.Decode(c, &vnc) - if err != nil { - return nil, err - } - return &vnc, nil -} diff --git a/command/server/config_custom_response_headers_test.go b/command/server/config_custom_response_headers_test.go index 11c4300b4160a..5380568c25107 100644 --- a/command/server/config_custom_response_headers_test.go +++ b/command/server/config_custom_response_headers_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package server import ( diff --git a/command/server/config_oss_test.go b/command/server/config_oss_test.go index 4a08ddf78ad03..f64670e03a556 100644 --- a/command/server/config_oss_test.go +++ b/command/server/config_oss_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !enterprise package server diff --git a/command/server/config_telemetry_test.go b/command/server/config_telemetry_test.go index 54245d05151a1..581710f565a7e 100644 --- a/command/server/config_telemetry_test.go +++ b/command/server/config_telemetry_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package server import ( diff --git a/command/server/config_test.go b/command/server/config_test.go index b570f1e766256..60a0910e13a6e 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -1,12 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package server import ( "fmt" - "reflect" - "strings" "testing" "github.com/stretchr/testify/require" @@ -44,10 +39,6 @@ func TestParseListeners(t *testing.T) { testParseListeners(t) } -func TestParseUserLockouts(t *testing.T) { - testParseUserLockouts(t) -} - func TestParseSockaddrTemplate(t *testing.T) { testParseSockaddrTemplate(t) } @@ -80,115 +71,6 @@ func TestUnknownFieldValidationListenerAndStorage(t *testing.T) { testUnknownFieldValidationStorageAndListener(t) } -func TestExperimentsConfigParsing(t *testing.T) { - const envKey = "VAULT_EXPERIMENTS" - originalValue := validExperiments - validExperiments = []string{"foo", "bar", "baz"} - t.Cleanup(func() { - validExperiments = originalValue - }) - - for name, tc := range map[string]struct { - fromConfig []string - fromEnv []string - fromCLI []string - expected []string - expectedError string - }{ - // Multiple sources. - "duplication": {[]string{"foo"}, []string{"foo"}, []string{"foo"}, []string{"foo"}, ""}, - "disjoint set": {[]string{"foo"}, []string{"bar"}, []string{"baz"}, []string{"foo", "bar", "baz"}, ""}, - - // Single source. - "config only": {[]string{"foo"}, nil, nil, []string{"foo"}, ""}, - "env only": {nil, []string{"foo"}, nil, []string{"foo"}, ""}, - "CLI only": {nil, nil, []string{"foo"}, []string{"foo"}, ""}, - - // Validation errors. - "config invalid": {[]string{"invalid"}, nil, nil, nil, "from config"}, - "env invalid": {nil, []string{"invalid"}, nil, nil, "from environment variable"}, - "CLI invalid": {nil, nil, []string{"invalid"}, nil, "from command line flag"}, - } { - t.Run(name, func(t *testing.T) { - var configString string - t.Setenv(envKey, strings.Join(tc.fromEnv, ",")) - if len(tc.fromConfig) != 0 { - configString = fmt.Sprintf("experiments = [\"%s\"]", strings.Join(tc.fromConfig, "\", \"")) - } - config, err := ParseConfig(configString, "") - if err == nil { - err = ExperimentsFromEnvAndCLI(config, envKey, tc.fromCLI) - } - - switch tc.expectedError { - case "": - if err != nil { - t.Fatal(err) - } - - default: - if err == nil || !strings.Contains(err.Error(), tc.expectedError) { - t.Fatalf("Expected error to contain %q, but got: %s", tc.expectedError, err) - } - } - }) - } -} - -func TestValidate(t *testing.T) { - originalValue := validExperiments - for name, tc := range map[string]struct { - validSet []string - input []string - expectError bool - }{ - // Valid cases - "minimal valid": {[]string{"foo"}, []string{"foo"}, false}, - "valid subset": {[]string{"foo", "bar"}, []string{"bar"}, false}, - "repeated": {[]string{"foo"}, []string{"foo", "foo"}, false}, - - // Error cases - "partially valid": {[]string{"foo", "bar"}, []string{"foo", "baz"}, true}, - "empty": {[]string{"foo"}, []string{""}, true}, - "no valid experiments": {[]string{}, []string{"foo"}, true}, - } { - t.Run(name, func(t *testing.T) { - t.Cleanup(func() { - validExperiments = originalValue - }) - - validExperiments = tc.validSet - err := validateExperiments(tc.input) - if tc.expectError && err == nil { - t.Fatal("Expected error but got none") - } - if !tc.expectError && err != nil { - t.Fatal("Did not expect error but got", err) - } - }) - } -} - -func TestMerge(t *testing.T) { - for name, tc := range map[string]struct { - left []string - right []string - expected []string - }{ - "disjoint": {[]string{"foo"}, []string{"bar"}, []string{"foo", "bar"}}, - "empty left": {[]string{}, []string{"foo"}, []string{"foo"}}, - "empty right": {[]string{"foo"}, []string{}, []string{"foo"}}, - "overlapping": {[]string{"foo", "bar"}, []string{"foo", "baz"}, []string{"foo", "bar", "baz"}}, - } { - t.Run(name, func(t *testing.T) { - result := mergeExperiments(tc.left, tc.right) - if !reflect.DeepEqual(tc.expected, result) { - t.Fatalf("Expected %v but got %v", tc.expected, result) - } - }) - } -} - // Test_parseDevTLSConfig verifies that both Windows and Unix directories are correctly escaped when creating a dev TLS // configuration in HCL func Test_parseDevTLSConfig(t *testing.T) { diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index f5136449cef49..bb6e273a6a469 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -1,12 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package server import ( "fmt" "reflect" - "sort" "strings" "testing" "time" @@ -455,9 +451,6 @@ func testLoadConfigFile(t *testing.T) { EnableRawEndpoint: true, EnableRawEndpointRaw: true, - EnableIntrospectionEndpoint: true, - EnableIntrospectionEndpointRaw: true, - DisableSealWrap: true, DisableSealWrapRaw: true, @@ -503,8 +496,8 @@ func testUnknownFieldValidation(t *testing.T) { Problem: "unknown or unsupported field bad_value found in configuration", Position: token.Pos{ Filename: "./test-fixtures/config.hcl", - Offset: 651, - Line: 37, + Offset: 583, + Line: 34, Column: 5, }, }, @@ -741,15 +734,12 @@ func testConfig_Sanitized(t *testing.T) { "disable_indexing": false, "disable_mlock": true, "disable_performance_standby": false, - "experiments": []string(nil), "plugin_file_uid": 0, "plugin_file_permissions": 0, "disable_printable_check": false, "disable_sealwrap": true, "raw_storage_endpoint": true, - "introspection_endpoint": false, "disable_sentinel_trace": true, - "detect_deadlocks": "", "enable_ui": true, "enable_response_header_hostname": false, "enable_response_header_raft_node_id": false, @@ -851,9 +841,6 @@ listener "tcp" { agent_api { enable_quit = true } - proxy_api { - enable_quit = true - } }`)) config := Config{ @@ -894,9 +881,6 @@ listener "tcp" { AgentAPI: &configutil.AgentAPI{ EnableQuit: true, }, - ProxyAPI: &configutil.ProxyAPI{ - EnableQuit: true, - }, CustomResponseHeaders: DefaultCustomHeaders, }, }, @@ -908,67 +892,6 @@ listener "tcp" { } } -func testParseUserLockouts(t *testing.T) { - obj, _ := hcl.Parse(strings.TrimSpace(` - user_lockout "all" { - lockout_duration = "40m" - lockout_counter_reset = "45m" - disable_lockout = "false" - } - user_lockout "userpass" { - lockout_threshold = "100" - lockout_duration = "20m" - } - user_lockout "ldap" { - disable_lockout = "true" - }`)) - - config := Config{ - SharedConfig: &configutil.SharedConfig{}, - } - list, _ := obj.Node.(*ast.ObjectList) - objList := list.Filter("user_lockout") - configutil.ParseUserLockouts(config.SharedConfig, objList) - - sort.Slice(config.SharedConfig.UserLockouts[:], func(i, j int) bool { - return config.SharedConfig.UserLockouts[i].Type < config.SharedConfig.UserLockouts[j].Type - }) - - expected := &Config{ - SharedConfig: &configutil.SharedConfig{ - UserLockouts: []*configutil.UserLockout{ - { - Type: "all", - LockoutThreshold: 5, - LockoutDuration: 2400000000000, - LockoutCounterReset: 2700000000000, - DisableLockout: false, - }, - { - Type: "userpass", - LockoutThreshold: 100, - LockoutDuration: 1200000000000, - LockoutCounterReset: 2700000000000, - DisableLockout: false, - }, - { - Type: "ldap", - LockoutThreshold: 5, - LockoutDuration: 2400000000000, - LockoutCounterReset: 2700000000000, - DisableLockout: true, - }, - }, - }, - } - - sort.Slice(expected.SharedConfig.UserLockouts[:], func(i, j int) bool { - return expected.SharedConfig.UserLockouts[i].Type < expected.SharedConfig.UserLockouts[j].Type - }) - config.Prune() - require.Equal(t, config, *expected) -} - func testParseSockaddrTemplate(t *testing.T) { config, err := ParseConfig(` api_addr = < 0, "test description %s", testcase.TestDescription) - require.Equal(t, testcase.TLSDisable, cfg.Listeners[0].TLSDisable, "test description %s", testcase.TestDescription) - } - require.Equal(t, testcase.CertPathEmpty, len(certPath) == 0, "test description %s", testcase.TestDescription) - require.Equal(t, testcase.ErrNotNil, (err != nil), "test description %s", testcase.TestDescription) - } -} diff --git a/command/server_util.go b/command/server_util.go index 7bf3196fc8d57..d5d9c8f4f3341 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/ssh.go b/command/ssh.go index 90bb03080118c..5ac2a3dd1484c 100644 --- a/command/ssh.go +++ b/command/ssh.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/ssh_test.go b/command/ssh_test.go index 137e541a8681f..3d2c8e0c886ef 100644 --- a/command/ssh_test.go +++ b/command/ssh_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/status.go b/command/status.go index 0b7c619742567..770adfcf3d48e 100644 --- a/command/status.go +++ b/command/status.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/status_test.go b/command/status_test.go index 5731945ae78f2..e34a72c578d10 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/test-fixtures/config.hcl b/command/test-fixtures/config.hcl index 164acd29cc802..31de773909c9a 100644 --- a/command/test-fixtures/config.hcl +++ b/command/test-fixtures/config.hcl @@ -1,4 +1 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - token_helper = "foo" diff --git a/command/test-fixtures/policy.hcl b/command/test-fixtures/policy.hcl index 267fc5cecdc59..7d46bdeabe16f 100644 --- a/command/test-fixtures/policy.hcl +++ b/command/test-fixtures/policy.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - path "secret/foo" { policy = "write" } diff --git a/command/token.go b/command/token.go index 7b15275283bc1..20af230a5b308 100644 --- a/command/token.go +++ b/command/token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token/helper.go b/command/token/helper.go index c8ce76326a7b7..ff559e40d447d 100644 --- a/command/token/helper.go +++ b/command/token/helper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token // TokenHelper is an interface that contains basic operations that must be diff --git a/command/token/helper_external.go b/command/token/helper_external.go index 12557a4b30609..83f5f8907291d 100644 --- a/command/token/helper_external.go +++ b/command/token/helper_external.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/command/token/helper_external_test.go b/command/token/helper_external_test.go index d95c8890eaf34..b49dd93343ccd 100644 --- a/command/token/helper_external_test.go +++ b/command/token/helper_external_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/command/token/helper_internal.go b/command/token/helper_internal.go index aeb4faa9bef39..c5f35721ee9e2 100644 --- a/command/token/helper_internal.go +++ b/command/token/helper_internal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/command/token/helper_internal_test.go b/command/token/helper_internal_test.go index e68359c820086..18f3abae56f6c 100644 --- a/command/token/helper_internal_test.go +++ b/command/token/helper_internal_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/command/token/helper_testing.go b/command/token/helper_testing.go index e95ff35580479..93465931b7890 100644 --- a/command/token/helper_testing.go +++ b/command/token/helper_testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/command/token/testing.go b/command/token/testing.go index 85da0840c84ac..725f1276a052a 100644 --- a/command/token/testing.go +++ b/command/token/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package token import ( diff --git a/command/token_capabilities.go b/command/token_capabilities.go index f7e359c06f13f..093765630d664 100644 --- a/command/token_capabilities.go +++ b/command/token_capabilities.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_capabilities_test.go b/command/token_capabilities_test.go index 14b0f27658341..874db49129af4 100644 --- a/command/token_capabilities_test.go +++ b/command/token_capabilities_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_create.go b/command/token_create.go index 1efee5ebff062..a8dc2f03ea7a3 100644 --- a/command/token_create.go +++ b/command/token_create.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_create_test.go b/command/token_create_test.go index 31bbd244b2e6b..a7e767926ae6a 100644 --- a/command/token_create_test.go +++ b/command/token_create_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_lookup.go b/command/token_lookup.go index 24161399a2c9b..55284a29d1bc6 100644 --- a/command/token_lookup.go +++ b/command/token_lookup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_lookup_test.go b/command/token_lookup_test.go index 3f5636527661e..e027b3f7c97bf 100644 --- a/command/token_lookup_test.go +++ b/command/token_lookup_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_renew.go b/command/token_renew.go index 7a61487b5a25e..88d6fa20fb2c2 100644 --- a/command/token_renew.go +++ b/command/token_renew.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_renew_test.go b/command/token_renew_test.go index 29d9292018cf4..c958d4d55af67 100644 --- a/command/token_renew_test.go +++ b/command/token_renew_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_revoke.go b/command/token_revoke.go index 48ccc27ac261b..f6eb72101bb37 100644 --- a/command/token_revoke.go +++ b/command/token_revoke.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/token_revoke_test.go b/command/token_revoke_test.go index 6ff8898301a18..7f66e9d4a0d88 100644 --- a/command/token_revoke_test.go +++ b/command/token_revoke_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/transform.go b/command/transform.go deleted file mode 100644 index 27345a441171c..0000000000000 --- a/command/transform.go +++ /dev/null @@ -1,41 +0,0 @@ -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -var _ cli.Command = (*TransformCommand)(nil) - -type TransformCommand struct { - *BaseCommand -} - -func (c *TransformCommand) Synopsis() string { - return "Interact with Vault's Transform Secrets Engine" -} - -func (c *TransformCommand) Help() string { - helpText := ` -Usage: vault transform [options] [args] - - This command has subcommands for interacting with Vault's Transform Secrets - Engine. Here are some simple examples, and more detailed examples are - available in the subcommands or the documentation. - - To import a key into a new FPE transformation: - - $ vault transform import transform/transformations/fpe/new-transformation @path/to/key \ - template=identifier \ - allowed_roles=physical-access - - Please see the individual subcommand help for detailed usage information. -` - - return strings.TrimSpace(helpText) -} - -func (c *TransformCommand) Run(args []string) int { - return cli.RunResultHelp -} diff --git a/command/transform_import_key.go b/command/transform_import_key.go deleted file mode 100644 index e41fa625ed052..0000000000000 --- a/command/transform_import_key.go +++ /dev/null @@ -1,76 +0,0 @@ -package command - -import ( - "errors" - "regexp" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*TransformImportCommand)(nil) - _ cli.CommandAutocomplete = (*TransformImportCommand)(nil) - transformKeyPath = regexp.MustCompile("^(.*)/transformations/(fpe|tokenization)/([^/]*)$") -) - -type TransformImportCommand struct { - *BaseCommand -} - -func (c *TransformImportCommand) Synopsis() string { - return "Import a key into the Transform secrets engines." -} - -func (c *TransformImportCommand) Help() string { - helpText := ` -Usage: vault transform import PATH KEY [options...] - - Using the Transform key wrapping system, imports key material from - the base64 encoded KEY (either directly on the CLI or via @path notation), - into a new FPE or tokenization transformation whose API path is PATH. - - To import a new key version into an existing tokenization transformation, - use import_version. - - The remaining options after KEY (key=value style) are passed on to - Create/Update FPE Transformation or Create/Update Tokenization Transformation - API endpoints. - - For example: - $ vault transform import transform/transformations/tokenization/application-form @path/to/key \ - allowed_roles=legacy-system -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *TransformImportCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) -} - -func (c *TransformImportCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *TransformImportCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *TransformImportCommand) Run(args []string) int { - return ImportKey(c.BaseCommand, "import", transformImportKeyPath, c.Flags(), args) -} - -func transformImportKeyPath(s string, operation string) (path string, apiPath string, err error) { - parts := transformKeyPath.FindStringSubmatch(s) - if len(parts) != 4 { - return "", "", errors.New("expected transform path and key name in the form :path:/transformations/fpe|tokenization/:name:") - } - path = parts[1] - transformation := parts[2] - keyName := parts[3] - apiPath = path + "/transformations/" + transformation + "/" + keyName + "/" + operation - - return path, apiPath, nil -} diff --git a/command/transform_import_key_version.go b/command/transform_import_key_version.go deleted file mode 100644 index 6ed8cb18bd02e..0000000000000 --- a/command/transform_import_key_version.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*TransformImportVersionCommand)(nil) - _ cli.CommandAutocomplete = (*TransformImportVersionCommand)(nil) -) - -type TransformImportVersionCommand struct { - *BaseCommand -} - -func (c *TransformImportVersionCommand) Synopsis() string { - return "Import key material into a new key version in the Transform secrets engines." -} - -func (c *TransformImportVersionCommand) Help() string { - helpText := ` -Usage: vault transform import-version PATH KEY [...] - - Using the Transform key wrapping system, imports new key material from - the base64 encoded KEY (either directly on the CLI or via @path notation), - into an existing tokenization transformation whose API path is PATH. - - The remaining options after KEY (key=value style) are passed on to - Create/Update Tokenization Transformation API endpoint. - - For example: - $ vault transform import-version transform/transformations/tokenization/application-form @path/to/new_version \ - allowed_roles=legacy-system -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *TransformImportVersionCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) -} - -func (c *TransformImportVersionCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *TransformImportVersionCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *TransformImportVersionCommand) Run(args []string) int { - return ImportKey(c.BaseCommand, "import_version", transformImportKeyPath, c.Flags(), args) -} diff --git a/command/transit.go b/command/transit.go deleted file mode 100644 index a48fef7852638..0000000000000 --- a/command/transit.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -var _ cli.Command = (*TransitCommand)(nil) - -type TransitCommand struct { - *BaseCommand -} - -func (c *TransitCommand) Synopsis() string { - return "Interact with Vault's Transit Secrets Engine" -} - -func (c *TransitCommand) Help() string { - helpText := ` -Usage: vault transit [options] [args] - - This command has subcommands for interacting with Vault's Transit Secrets - Engine. Here are some simple examples, and more detailed examples are - available in the subcommands or the documentation. - - To import a key into the specified Transit mount: - - $ vault transit import transit/keys/newly-imported @path/to/key type=rsa-2048 - - Please see the individual subcommand help for detailed usage information. -` - - return strings.TrimSpace(helpText) -} - -func (c *TransitCommand) Run(args []string) int { - return cli.RunResultHelp -} diff --git a/command/transit_import_key.go b/command/transit_import_key.go deleted file mode 100644 index 3eea70093bc1e..0000000000000 --- a/command/transit_import_key.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "os" - "regexp" - "strings" - - "github.com/hashicorp/vault/api" - - "github.com/google/tink/go/kwp/subtle" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*TransitImportCommand)(nil) - _ cli.CommandAutocomplete = (*TransitImportCommand)(nil) - keyPath = regexp.MustCompile("^(.*)/keys/([^/]*)$") -) - -type TransitImportCommand struct { - *BaseCommand -} - -func (c *TransitImportCommand) Synopsis() string { - return "Import a key into the Transit secrets engines." -} - -func (c *TransitImportCommand) Help() string { - helpText := ` -Usage: vault transit import PATH KEY [options...] - - Using the Transit key wrapping system, imports key material from - the base64 encoded KEY (either directly on the CLI or via @path notation), - into a new key whose API path is PATH. To import a new version into an - existing key, use import_version. The remaining options after KEY (key=value - style) are passed on to the Transit create key endpoint. If your - system or device natively supports the RSA AES key wrap mechanism (such as - the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly - rather than this command. - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *TransitImportCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) -} - -func (c *TransitImportCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *TransitImportCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *TransitImportCommand) Run(args []string) int { - return ImportKey(c.BaseCommand, "import", transitImportKeyPath, c.Flags(), args) -} - -func transitImportKeyPath(s string, operation string) (path string, apiPath string, err error) { - parts := keyPath.FindStringSubmatch(s) - if len(parts) != 3 { - return "", "", errors.New("expected transit path and key name in the form :path:/keys/:name:") - } - path = parts[1] - keyName := parts[2] - apiPath = path + "/keys/" + keyName + "/" + operation - - return path, apiPath, nil -} - -type ImportKeyFunc func(s string, operation string) (path string, apiPath string, err error) - -// error codes: 1: user error, 2: internal computation error, 3: remote api call error -func ImportKey(c *BaseCommand, operation string, pathFunc ImportKeyFunc, flags *FlagSets, args []string) int { - // Parse and validate the arguments. - if err := flags.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = flags.Args() - if len(args) < 2 { - c.UI.Error(fmt.Sprintf("Incorrect argument count (expected 2+, got %d). Wanted PATH to import into and KEY material.", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - ephemeralAESKey := make([]byte, 32) - _, err = rand.Read(ephemeralAESKey) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to generate ephemeral key: %v", err)) - } - path, apiPath, err := pathFunc(args[0], operation) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - keyMaterial := args[1] - if keyMaterial[0] == '@' { - keyMaterialBytes, err := os.ReadFile(keyMaterial[1:]) - if err != nil { - c.UI.Error(fmt.Sprintf("error reading key material file: %v", err)) - return 1 - } - - keyMaterial = string(keyMaterialBytes) - } - - key, err := base64.StdEncoding.DecodeString(keyMaterial) - if err != nil { - c.UI.Error(fmt.Sprintf("error base64 decoding source key material: %v", err)) - return 1 - } - // Fetch the wrapping key - c.UI.Output("Retrieving wrapping key.") - wrappingKey, err := fetchWrappingKey(c, client, path) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to fetch wrapping key: %v", err)) - return 3 - } - c.UI.Output("Wrapping source key with ephemeral key.") - wrapKWP, err := subtle.NewKWP(ephemeralAESKey) - if err != nil { - c.UI.Error(fmt.Sprintf("failure building key wrapping key: %v", err)) - return 2 - } - wrappedTargetKey, err := wrapKWP.Wrap(key) - if err != nil { - c.UI.Error(fmt.Sprintf("failure wrapping source key: %v", err)) - return 2 - } - c.UI.Output("Encrypting ephemeral key with wrapping key.") - wrappedAESKey, err := rsa.EncryptOAEP( - sha256.New(), - rand.Reader, - wrappingKey.(*rsa.PublicKey), - ephemeralAESKey, - []byte{}, - ) - if err != nil { - c.UI.Error(fmt.Sprintf("failure encrypting wrapped key: %v", err)) - return 2 - } - combinedCiphertext := append(wrappedAESKey, wrappedTargetKey...) - importCiphertext := base64.StdEncoding.EncodeToString(combinedCiphertext) - - // Parse all the key options - data, err := parseArgsData(os.Stdin, args[2:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse extra K=V data: %s", err)) - return 1 - } - if data == nil { - data = make(map[string]interface{}, 1) - } - - data["ciphertext"] = importCiphertext - - c.UI.Output("Submitting wrapped key.") - // Finally, call import - - _, err = client.Logical().Write(apiPath, data) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to call import:%v", err)) - return 3 - } else { - c.UI.Output("Success!") - return 0 - } -} - -func fetchWrappingKey(c *BaseCommand, client *api.Client, path string) (any, error) { - resp, err := client.Logical().Read(path + "/wrapping_key") - if err != nil { - return nil, fmt.Errorf("error fetching wrapping key: %w", err) - } - if resp == nil { - return nil, fmt.Errorf("no mount found at %s: %v", path, err) - } - key, ok := resp.Data["public_key"] - if !ok { - c.UI.Error("could not find wrapping key") - } - keyBlock, _ := pem.Decode([]byte(key.(string))) - parsedKey, err := x509.ParsePKIXPublicKey(keyBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("error parsing wrapping key: %w", err) - } - return parsedKey, nil -} diff --git a/command/transit_import_key_test.go b/command/transit_import_key_test.go deleted file mode 100644 index e01c03fa00bb6..0000000000000 --- a/command/transit_import_key_test.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "testing" - - "github.com/hashicorp/vault/api" - - "github.com/stretchr/testify/require" -) - -// Validate the `vault transit import` command works. -func TestTransitImport(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("transit", &api.MountInput{ - Type: "transit", - }); err != nil { - t.Fatalf("transit mount error: %#v", err) - } - - rsa1, rsa2, aes128, aes256 := generateKeys(t) - - type testCase struct { - variant string - path string - key []byte - args []string - shouldFail bool - } - tests := []testCase{ - { - "import", - "transit/keys/rsa1", - rsa1, - []string{"type=rsa-2048"}, - false, /* first import */ - }, - { - "import", - "transit/keys/rsa1", - rsa2, - []string{"type=rsa-2048"}, - true, /* already exists */ - }, - { - "import-version", - "transit/keys/rsa1", - rsa2, - []string{"type=rsa-2048"}, - false, /* new version */ - }, - { - "import", - "transit/keys/rsa2", - rsa2, - []string{"type=rsa-4096"}, - true, /* wrong type */ - }, - { - "import", - "transit/keys/rsa2", - rsa2, - []string{"type=rsa-2048"}, - false, /* new name */ - }, - { - "import", - "transit/keys/aes1", - aes128, - []string{"type=aes128-gcm96"}, - false, /* first import */ - }, - { - "import", - "transit/keys/aes1", - aes256, - []string{"type=aes256-gcm96"}, - true, /* already exists */ - }, - { - "import-version", - "transit/keys/aes1", - aes256, - []string{"type=aes256-gcm96"}, - true, /* new version, different type */ - }, - { - "import-version", - "transit/keys/aes1", - aes128, - []string{"type=aes128-gcm96"}, - false, /* new version */ - }, - { - "import", - "transit/keys/aes2", - aes256, - []string{"type=aes128-gcm96"}, - true, /* wrong type */ - }, - { - "import", - "transit/keys/aes2", - aes256, - []string{"type=aes256-gcm96"}, - false, /* new name */ - }, - } - - for index, tc := range tests { - t.Logf("Running test case %d: %v", index, tc) - execTransitImport(t, client, tc.variant, tc.path, tc.key, tc.args, tc.shouldFail) - } -} - -func execTransitImport(t *testing.T, client *api.Client, method string, path string, key []byte, data []string, expectFailure bool) { - t.Helper() - - keyBase64 := base64.StdEncoding.EncodeToString(key) - - var args []string - args = append(args, "transit") - args = append(args, method) - args = append(args, path) - args = append(args, keyBase64) - args = append(args, data...) - - stdout := bytes.NewBuffer(nil) - stderr := bytes.NewBuffer(nil) - runOpts := &RunOptions{ - Stdout: stdout, - Stderr: stderr, - Client: client, - } - - code := RunCustom(args, runOpts) - combined := stdout.String() + stderr.String() - - if code != 0 { - if !expectFailure { - t.Fatalf("Got unexpected failure from test (ret %d): %v", code, combined) - } - } else { - if expectFailure { - t.Fatalf("Expected failure, got success from test (ret %d): %v", code, combined) - } - } -} - -func generateKeys(t *testing.T) (rsa1 []byte, rsa2 []byte, aes128 []byte, aes256 []byte) { - t.Helper() - - priv1, err := rsa.GenerateKey(rand.Reader, 2048) - require.NotNil(t, priv1, "failed generating RSA 1 key") - require.NoError(t, err, "failed generating RSA 1 key") - - rsa1, err = x509.MarshalPKCS8PrivateKey(priv1) - require.NotNil(t, rsa1, "failed marshaling RSA 1 key") - require.NoError(t, err, "failed marshaling RSA 1 key") - - priv2, err := rsa.GenerateKey(rand.Reader, 2048) - require.NotNil(t, priv2, "failed generating RSA 2 key") - require.NoError(t, err, "failed generating RSA 2 key") - - rsa2, err = x509.MarshalPKCS8PrivateKey(priv2) - require.NotNil(t, rsa2, "failed marshaling RSA 2 key") - require.NoError(t, err, "failed marshaling RSA 2 key") - - aes128 = make([]byte, 128/8) - _, err = rand.Read(aes128) - require.NoError(t, err, "failed generating AES 128 key") - - aes256 = make([]byte, 256/8) - _, err = rand.Read(aes256) - require.NoError(t, err, "failed generating AES 256 key") - - return -} diff --git a/command/transit_import_key_version.go b/command/transit_import_key_version.go deleted file mode 100644 index 1a25078923bb8..0000000000000 --- a/command/transit_import_key_version.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*TransitImportVersionCommand)(nil) - _ cli.CommandAutocomplete = (*TransitImportVersionCommand)(nil) -) - -type TransitImportVersionCommand struct { - *BaseCommand -} - -func (c *TransitImportVersionCommand) Synopsis() string { - return "Import key material into a new key version in the Transit secrets engines." -} - -func (c *TransitImportVersionCommand) Help() string { - helpText := ` -Usage: vault transit import-version PATH KEY [...] - - Using the Transit key wrapping system, imports key material from - the base64 encoded KEY (either directly on the CLI or via @path notation), - into a new key whose API path is PATH. To import a new Transit - key, use the import command instead. The remaining options after KEY - (key=value style) are passed on to the Transit create key endpoint. - If your system or device natively supports the RSA AES key wrap mechanism - (such as the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it - directly rather than this command. - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *TransitImportVersionCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) -} - -func (c *TransitImportVersionCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *TransitImportVersionCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *TransitImportVersionCommand) Run(args []string) int { - return ImportKey(c.BaseCommand, "import_version", transitImportKeyPath, c.Flags(), args) -} diff --git a/command/unwrap.go b/command/unwrap.go index 1f920e7806eb3..53ff0787de280 100644 --- a/command/unwrap.go +++ b/command/unwrap.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/unwrap_test.go b/command/unwrap_test.go index 608edff51778e..4a06418b027ce 100644 --- a/command/unwrap_test.go +++ b/command/unwrap_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/util.go b/command/util.go index e24d65d7259f1..4f9614e234fb3 100644 --- a/command/util.go +++ b/command/util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -107,7 +104,7 @@ func PrintRawField(ui cli.Ui, data interface{}, field string) int { } format := Format(ui) - if format == "" || format == "table" || format == "raw" { + if format == "" || format == "table" { return PrintRaw(ui, fmt.Sprintf("%v", val)) } diff --git a/command/version.go b/command/version.go index e9b17227b0bfe..ad366601b736c 100644 --- a/command/version.go +++ b/command/version.go @@ -1,12 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "strings" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/version_history.go b/command/version_history.go index b2dfbae425d9a..56d3deb963547 100644 --- a/command/version_history.go +++ b/command/version_history.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/command/version_history_test.go b/command/version_history_test.go index c011a4bf4d7b7..ba02626a40215 100644 --- a/command/version_history_test.go +++ b/command/version_history_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -9,7 +6,7 @@ import ( "strings" "testing" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" "github.com/mitchellh/cli" ) diff --git a/command/version_test.go b/command/version_test.go index ede21e62a9cf7..b952cef01c3ed 100644 --- a/command/version_test.go +++ b/command/version_test.go @@ -1,13 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( "strings" "testing" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" "github.com/mitchellh/cli" ) diff --git a/command/write.go b/command/write.go index 2cc93b8a35049..15629052f7c2d 100644 --- a/command/write.go +++ b/command/write.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( @@ -9,7 +6,6 @@ import ( "os" "strings" - "github.com/hashicorp/vault/api" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -142,10 +138,6 @@ func (c *WriteCommand) Run(args []string) int { } secret, err := client.Logical().Write(path, data) - return handleWriteSecretOutput(c.BaseCommand, path, secret, err) -} - -func handleWriteSecretOutput(c *BaseCommand, path string, secret *api.Secret, err error) int { if err != nil { c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) if secret != nil { diff --git a/command/write_test.go b/command/write_test.go index 9b76d391c8540..03aab4c79af2c 100644 --- a/command/write_test.go +++ b/command/write_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package command import ( diff --git a/enos/ci/aws-nuke.yml b/enos/ci/aws-nuke.yml index 50a567704925e..6089d0d30573d 100644 --- a/enos/ci/aws-nuke.yml +++ b/enos/ci/aws-nuke.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - regions: - eu-north-1 - ap-south-1 diff --git a/enos/ci/bootstrap/main.tf b/enos/ci/bootstrap/main.tf index c5ce812d90b24..804f1e66bfa4e 100644 --- a/enos/ci/bootstrap/main.tf +++ b/enos/ci/bootstrap/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { aws = { diff --git a/enos/ci/bootstrap/outputs.tf b/enos/ci/bootstrap/outputs.tf index e6ff37270bd54..858318e4cd5cf 100644 --- a/enos/ci/bootstrap/outputs.tf +++ b/enos/ci/bootstrap/outputs.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - output "keys" { value = { "us-east-1" = { diff --git a/enos/ci/bootstrap/variables.tf b/enos/ci/bootstrap/variables.tf index 3fb53bc517775..3aab3449f368d 100644 --- a/enos/ci/bootstrap/variables.tf +++ b/enos/ci/bootstrap/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "aws_ssh_public_key" { description = "The public key to use for the ssh key" type = string diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf index c70dbaa13be50..bea2d46a43096 100644 --- a/enos/ci/service-user-iam/main.tf +++ b/enos/ci/service-user-iam/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { aws = { diff --git a/enos/ci/service-user-iam/outputs.tf b/enos/ci/service-user-iam/outputs.tf index ba980d59d076b..d4ba89910df9e 100644 --- a/enos/ci/service-user-iam/outputs.tf +++ b/enos/ci/service-user-iam/outputs.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - output "ci_role" { value = { name = aws_iam_role.role.name diff --git a/enos/ci/service-user-iam/providers.tf b/enos/ci/service-user-iam/providers.tf index 7baba3344006b..09c86d7bae4e2 100644 --- a/enos/ci/service-user-iam/providers.tf +++ b/enos/ci/service-user-iam/providers.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - provider "aws" { region = "us-east-1" alias = "us_east_1" diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf index 3044d41778c42..544f311504e7d 100644 --- a/enos/ci/service-user-iam/service-quotas.tf +++ b/enos/ci/service-user-iam/service-quotas.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - locals { // This is the code of the service quota to request a change for. Each adjustable limit has a // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code @@ -9,35 +6,35 @@ locals { } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { - provider = aws.us_east_1 + provider = aws.us_east_2 quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 100 + value = 50 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { provider = aws.us_east_2 quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 100 + value = 50 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { provider = aws.us_west_1 quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 100 + value = 50 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { provider = aws.us_west_2 quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 100 + value = 50 } resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { - provider = aws.us_east_1 + provider = aws.us_east_2 quota_code = local.standard_spot_instance_requests_quota service_code = "ec2" value = 640 diff --git a/enos/ci/service-user-iam/variables.tf b/enos/ci/service-user-iam/variables.tf index a80d83ca98c33..6cc7efd6bd9bc 100644 --- a/enos/ci/service-user-iam/variables.tf +++ b/enos/ci/service-user-iam/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "repository" { description = "The GitHub repository, either vault or vault-enterprise" type = string diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 0886b46a061b6..19116dd51ca09 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - module "autopilot_upgrade_storageconfig" { source = "./modules/autopilot_upgrade_storageconfig" } diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl index f277c57e29f03..9301b55037d06 100644 --- a/enos/enos-providers.hcl +++ b/enos/enos-providers.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - provider "aws" "default" { region = var.aws_region } diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index dd10112ceacb5..d7b4cc3994f6c 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - scenario "agent" { matrix { arch = ["amd64", "arm64"] diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index 234451872acba..4e86348edf67b 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - scenario "autopilot" { matrix { arch = ["amd64", "arm64"] diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl index aebfd849b9ee4..4324c01ce2ceb 100644 --- a/enos/enos-scenario-replication.hcl +++ b/enos/enos-scenario-replication.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - // The replication scenario configures performance replication between two Vault clusters and verifies // known_primary_cluster_addrs are updated on secondary Vault cluster with the IP addresses of replaced // nodes on primary Vault cluster diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 3e5c313f02e63..0d2ebed5f581f 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - scenario "smoke" { matrix { arch = ["amd64", "arm64"] diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl index 9cd734a751a52..619cd73efc214 100644 --- a/enos/enos-scenario-ui.hcl +++ b/enos/enos-scenario-ui.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - scenario "ui" { matrix { edition = ["oss", "ent"] diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index adc97dc584567..e6e719e800a14 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - scenario "upgrade" { matrix { arch = ["amd64", "arm64"] diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl index 4048f5e181c2a..e45ccaa888f28 100644 --- a/enos/enos-terraform.hcl +++ b/enos/enos-terraform.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform_cli "default" { plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index ef445d0a2b09a..1d0aea6ebd6b3 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "artifact_path" { type = string description = "The local path for dev artifact to test" diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl index d63af4452c806..d157c7b92650e 100644 --- a/enos/enos.vars.hcl +++ b/enos/enos.vars.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # aws_region is the AWS region where we'll create infrastructure # for the smoke scenario # aws_region = "us-west-1" diff --git a/enos/k8s/enos-modules-k8s.hcl b/enos/k8s/enos-modules-k8s.hcl index 76518403b750c..75bfd7856461d 100644 --- a/enos/k8s/enos-modules-k8s.hcl +++ b/enos/k8s/enos-modules-k8s.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - module "create_kind_cluster" { source = "../modules/local_kind_cluster" } diff --git a/enos/k8s/enos-providers-k8s.hcl b/enos/k8s/enos-providers-k8s.hcl index 7e3d7a7743157..2a0cf2be34841 100644 --- a/enos/k8s/enos-providers-k8s.hcl +++ b/enos/k8s/enos-providers-k8s.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - provider "enos" "default" {} provider "helm" "default" { diff --git a/enos/k8s/enos-scenario-k8s.hcl b/enos/k8s/enos-scenario-k8s.hcl index 8953d5c37e596..cc4769dd4f645 100644 --- a/enos/k8s/enos-scenario-k8s.hcl +++ b/enos/k8s/enos-scenario-k8s.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - scenario "k8s" { matrix { edition = ["oss", "ent"] diff --git a/enos/k8s/enos-terraform-k8s.hcl b/enos/k8s/enos-terraform-k8s.hcl index 5b68bcada61e8..a389153d24f25 100644 --- a/enos/k8s/enos-terraform-k8s.hcl +++ b/enos/k8s/enos-terraform-k8s.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform "k8s" { required_version = ">= 1.2.0" diff --git a/enos/k8s/enos-variables-k8s.hcl b/enos/k8s/enos-variables-k8s.hcl index 86bf9d5e3642b..fb8983f218429 100644 --- a/enos/k8s/enos-variables-k8s.hcl +++ b/enos/k8s/enos-variables-k8s.hcl @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_image_repository" { description = "The repository for the docker image to load, i.e. hashicorp/vault" type = string diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf index 68f47d19dd716..6093b8b1066de 100644 --- a/enos/modules/autopilot_upgrade_storageconfig/main.tf +++ b/enos/modules/autopilot_upgrade_storageconfig/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_product_version" {} output "storage_addl_config" { diff --git a/enos/modules/az_finder/main.tf b/enos/modules/az_finder/main.tf index 3508ff0cc39c7..b55975578c61c 100644 --- a/enos/modules/az_finder/main.tf +++ b/enos/modules/az_finder/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { aws = { diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf index 2e6afa215e9eb..4cb8e58a592bf 100644 --- a/enos/modules/backend_raft/main.tf +++ b/enos/modules/backend_raft/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - // Shim module to handle the fact that Vault doesn't actually need a backend module terraform { required_providers { diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf index 1e125763a16d5..cffa44b17a001 100644 --- a/enos/modules/build_crt/main.tf +++ b/enos/modules/build_crt/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Shim module since CRT provided things will use the crt_bundle_path variable variable "bundle_path" { default = "/tmp/vault.zip" diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf index a55850076ed3c..11f29585eca77 100644 --- a/enos/modules/build_local/main.tf +++ b/enos/modules/build_local/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh index 64f1bbd8d7d6e..385a3af557040 100755 --- a/enos/modules/build_local/scripts/build.sh +++ b/enos/modules/build_local/scripts/build.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -eux -o pipefail # Install yarn so we can build the UI diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf index 49a4a15e24c0e..fbba304bd7cb4 100644 --- a/enos/modules/generate_secondary_token/main.tf +++ b/enos/modules/generate_secondary_token/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/get_local_metadata/main.tf b/enos/modules/get_local_metadata/main.tf index 1c5dc84be9cbc..fd33673470efe 100644 --- a/enos/modules/get_local_metadata/main.tf +++ b/enos/modules/get_local_metadata/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh index fc763fd4e91a9..917888eb1cbfa 100755 --- a/enos/modules/get_local_metadata/scripts/build_date.sh +++ b/enos/modules/get_local_metadata/scripts/build_date.sh @@ -1,7 +1,4 @@ #!/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -eu -o pipefail pushd "$(git rev-parse --show-toplevel)" > /dev/null diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh index 74a561f11bd8e..6921d772ea467 100755 --- a/enos/modules/get_local_metadata/scripts/version.sh +++ b/enos/modules/get_local_metadata/scripts/version.sh @@ -1,7 +1,4 @@ #!/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -eu -o pipefail pushd "$(git rev-parse --show-toplevel)" > /dev/null diff --git a/enos/modules/get_local_version_from_make/main.tf b/enos/modules/get_local_version_from_make/main.tf new file mode 100644 index 0000000000000..a7dcf6be7f2ab --- /dev/null +++ b/enos/modules/get_local_version_from_make/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +resource "enos_local_exec" "get_version" { + scripts = ["${path.module}/scripts/version.sh"] +} + +output "version" { + value = trimspace(enos_local_exec.get_version.stdout) +} diff --git a/enos/modules/get_local_version_from_make/scripts/version.sh b/enos/modules/get_local_version_from_make/scripts/version.sh new file mode 100755 index 0000000000000..13970ead9752c --- /dev/null +++ b/enos/modules/get_local_version_from_make/scripts/version.sh @@ -0,0 +1,10 @@ +#!/bin/env bash +set -eu -o pipefail + +# Set up the environment for building Vault. +root_dir="$(git rev-parse --show-toplevel)" + +pushd "$root_dir" > /dev/null + +IFS="-" read -r VAULT_VERSION _other <<< "$(make version)" +echo $VAULT_VERSION diff --git a/enos/modules/k8s_deploy_vault/main.tf b/enos/modules/k8s_deploy_vault/main.tf index 72f4f4700354f..1878ebeb72596 100644 --- a/enos/modules/k8s_deploy_vault/main.tf +++ b/enos/modules/k8s_deploy_vault/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_version = ">= 1.0" diff --git a/enos/modules/k8s_deploy_vault/variables.tf b/enos/modules/k8s_deploy_vault/variables.tf index 55fa6f1da6b9a..e9867d7fa0fb7 100644 --- a/enos/modules/k8s_deploy_vault/variables.tf +++ b/enos/modules/k8s_deploy_vault/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "context_name" { type = string description = "The name of the k8s context for Vault" diff --git a/enos/modules/k8s_vault_verify_build_date/main.tf b/enos/modules/k8s_vault_verify_build_date/main.tf index 366497d08475c..38f17fbc9e797 100644 --- a/enos/modules/k8s_vault_verify_build_date/main.tf +++ b/enos/modules/k8s_vault_verify_build_date/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/k8s_vault_verify_build_date/variables.tf b/enos/modules/k8s_vault_verify_build_date/variables.tf index d960b7840e3e4..7bba75ba68d82 100644 --- a/enos/modules/k8s_vault_verify_build_date/variables.tf +++ b/enos/modules/k8s_vault_verify_build_date/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_replication/main.tf b/enos/modules/k8s_vault_verify_replication/main.tf index 27824dc7676b6..804b934591f1c 100644 --- a/enos/modules/k8s_vault_verify_replication/main.tf +++ b/enos/modules/k8s_vault_verify_replication/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh index 5786502cbcdd2..363ce7185845b 100755 --- a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh +++ b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # The Vault replication smoke test, documented in # https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 diff --git a/enos/modules/k8s_vault_verify_replication/variables.tf b/enos/modules/k8s_vault_verify_replication/variables.tf index 7d4337801defa..42ab38aa572bb 100644 --- a/enos/modules/k8s_vault_verify_replication/variables.tf +++ b/enos/modules/k8s_vault_verify_replication/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_ui/main.tf b/enos/modules/k8s_vault_verify_ui/main.tf index ce5796096771f..faccb7085870d 100644 --- a/enos/modules/k8s_vault_verify_ui/main.tf +++ b/enos/modules/k8s_vault_verify_ui/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh index f6b8a278dce72..b85d4da124737 100755 --- a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh +++ b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/k8s_vault_verify_ui/variables.tf b/enos/modules/k8s_vault_verify_ui/variables.tf index c39f24e1fa2e5..6c06d5de792ec 100644 --- a/enos/modules/k8s_vault_verify_ui/variables.tf +++ b/enos/modules/k8s_vault_verify_ui/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf index 8decaaa778489..693abf97910f2 100644 --- a/enos/modules/k8s_vault_verify_version/main.tf +++ b/enos/modules/k8s_vault_verify_version/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh index a799ebc66f3fb..3d2d1fe97506a 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh @@ -1,7 +1,4 @@ #!/usr/bin/env sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh index 514969cf50985..d5c439a9b264c 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # The Vault smoke test to verify the Vault version installed diff --git a/enos/modules/k8s_vault_verify_version/variables.tf b/enos/modules/k8s_vault_verify_version/variables.tf index 58940a8551bda..ed487831a3c06 100644 --- a/enos/modules/k8s_vault_verify_version/variables.tf +++ b/enos/modules/k8s_vault_verify_version/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_write_data/main.tf b/enos/modules/k8s_vault_verify_write_data/main.tf index 5606b89883521..01caeaba4c158 100644 --- a/enos/modules/k8s_vault_verify_write_data/main.tf +++ b/enos/modules/k8s_vault_verify_write_data/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/k8s_vault_verify_write_data/variables.tf b/enos/modules/k8s_vault_verify_write_data/variables.tf index d960b7840e3e4..7bba75ba68d82 100644 --- a/enos/modules/k8s_vault_verify_write_data/variables.tf +++ b/enos/modules/k8s_vault_verify_write_data/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/load_docker_image/main.tf b/enos/modules/load_docker_image/main.tf index 4e5f293f90843..854c52f9ea690 100644 --- a/enos/modules/load_docker_image/main.tf +++ b/enos/modules/load_docker_image/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/local_kind_cluster/main.tf b/enos/modules/local_kind_cluster/main.tf index 5a351679b2e25..c5da14daf770a 100644 --- a/enos/modules/local_kind_cluster/main.tf +++ b/enos/modules/local_kind_cluster/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf index a1358b3e293d9..1b645272abe7c 100644 --- a/enos/modules/read_license/main.tf +++ b/enos/modules/read_license/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "file_name" {} output "license" { diff --git a/enos/modules/shutdown_multiple_nodes/main.tf b/enos/modules/shutdown_multiple_nodes/main.tf index 86045db0bc255..df0d1fb876001 100644 --- a/enos/modules/shutdown_multiple_nodes/main.tf +++ b/enos/modules/shutdown_multiple_nodes/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf index f27de68534e84..0ab4617e16300 100644 --- a/enos/modules/shutdown_node/main.tf +++ b/enos/modules/shutdown_node/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault-verify-replication/main.tf b/enos/modules/vault-verify-replication/main.tf new file mode 100644 index 0000000000000..57a97f9ddd1b7 --- /dev/null +++ b/enos/modules/vault-verify-replication/main.tf @@ -0,0 +1,31 @@ + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-replication" { + for_each = local.instances + + content = templatefile("${path.module}/templates/smoke-verify-replication.sh", { + vault_edition = var.vault_edition + }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh b/enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh new file mode 100644 index 0000000000000..d7bc72f23c246 --- /dev/null +++ b/enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# The Vault replication smoke test, documented in +# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 + +set -e + +edition=${vault_edition} + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +# Replication status endpoint should have data.mode disabled for OSS release +status=$(curl -s http://localhost:8200/v1/sys/replication/status) +if [ "$edition" == "oss" ]; then + if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then + fail "replication data mode is not disabled for OSS release!" + fi +else + if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then + fail "DR replication should be available for an ENT release!" + fi + if [ "$(jq -r '.data.performance' <<< "$status")" == "" ]; then + fail "Performance replication should be available for an ENT release!" + fi +fi diff --git a/enos/modules/vault-verify-replication/variables.tf b/enos/modules/vault-verify-replication/variables.tf new file mode 100644 index 0000000000000..b335ee45efcef --- /dev/null +++ b/enos/modules/vault-verify-replication/variables.tf @@ -0,0 +1,24 @@ + +variable "vault_edition" { + type = string + description = "The vault product edition" + default = null +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} diff --git a/enos/modules/vault-verify-ui/main.tf b/enos/modules/vault-verify-ui/main.tf new file mode 100644 index 0000000000000..5703326d1a510 --- /dev/null +++ b/enos/modules/vault-verify-ui/main.tf @@ -0,0 +1,31 @@ + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-ui" { + for_each = local.instances + + content = templatefile("${path.module}/templates/smoke-verify-ui.sh", { + vault_install_dir = var.vault_install_dir, + }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh b/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh new file mode 100644 index 0000000000000..bcd7e1cc3055e --- /dev/null +++ b/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} +if [ "$(curl -s -o /dev/null -w "%%{redirect_url}" http://localhost:8200/)" != "http://localhost:8200/ui/" ]; then + fail "Port 8200 not redirecting to UI" +fi +if curl -s http://localhost:8200/ui/ | grep -q 'Vault UI is not available'; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/vault-verify-ui/variables.tf b/enos/modules/vault-verify-ui/variables.tf new file mode 100644 index 0000000000000..7eaf5d1bf7f4a --- /dev/null +++ b/enos/modules/vault-verify-ui/variables.tf @@ -0,0 +1,19 @@ + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = null +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} diff --git a/enos/modules/vault-verify-write-data/main.tf b/enos/modules/vault-verify-write-data/main.tf new file mode 100644 index 0000000000000..966e833f740b0 --- /dev/null +++ b/enos/modules/vault-verify-write-data/main.tf @@ -0,0 +1,50 @@ + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-enable-secrets-kv" { + + content = templatefile("${path.module}/templates/smoke-enable-secrets-kv.sh", { + vault_install_dir = var.vault_install_dir, + vault_token = var.vault_root_token, + }) + + transport = { + ssh = { + host = local.instances[0].public_ip + } + } +} + +# Verify that we can enable the k/v secrets engine and write data to it. +resource "enos_remote_exec" "smoke-write-test-data" { + depends_on = [enos_remote_exec.smoke-enable-secrets-kv] + for_each = local.instances + + content = templatefile("${path.module}/templates/smoke-write-test-data.sh", { + test_key = "smoke${each.key}" + test_value = "fire" + vault_install_dir = var.vault_install_dir, + vault_token = var.vault_root_token, + }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh b/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh new file mode 100644 index 0000000000000..fb28fd9a8240e --- /dev/null +++ b/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -e + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +function fail { + echo "$1" 1>&2 + exit 1 +} + +binpath=${vault_install_dir}/vault + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +export VAULT_TOKEN='${vault_token}' + +retry 5 "$binpath" status > /dev/null 2>&1 +retry 5 $binpath secrets enable -path="secret" kv diff --git a/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh b/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh new file mode 100644 index 0000000000000..d514881425a1e --- /dev/null +++ b/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -e + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +function fail { + echo "$1" 1>&2 + exit 1 +} + +binpath=${vault_install_dir}/vault +testkey=${test_key} +testvalue=${test_value} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +export VAULT_TOKEN='${vault_token}' + +retry 5 "$binpath" status > /dev/null 2>&1 +retry 5 $binpath kv put secret/test $testkey=$testvalue diff --git a/enos/modules/vault-verify-write-data/variables.tf b/enos/modules/vault-verify-write-data/variables.tf new file mode 100644 index 0000000000000..ac00f1091fcba --- /dev/null +++ b/enos/modules/vault-verify-write-data/variables.tf @@ -0,0 +1,25 @@ + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = null +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" + default = null +} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf index cb112020b3903..001a53278a27a 100644 --- a/enos/modules/vault_agent/main.tf +++ b/enos/modules/vault_agent/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { aws = { diff --git a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh b/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh index 42a0976416423..5444508de3036 100644 --- a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh +++ b/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_artifactory_artifact/locals.tf b/enos/modules/vault_artifactory_artifact/locals.tf index 708813faa4541..e022a62c3966e 100644 --- a/enos/modules/vault_artifactory_artifact/locals.tf +++ b/enos/modules/vault_artifactory_artifact/locals.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - locals { // file name extensions for the install packages of vault for the various architectures, distributions and editions diff --git a/enos/modules/vault_artifactory_artifact/main.tf b/enos/modules/vault_artifactory_artifact/main.tf index 0f0df3865c7ad..ebc517030ba1e 100644 --- a/enos/modules/vault_artifactory_artifact/main.tf +++ b/enos/modules/vault_artifactory_artifact/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_artifactory_artifact/outputs.tf b/enos/modules/vault_artifactory_artifact/outputs.tf index c100c45ddd979..827b2e7734158 100644 --- a/enos/modules/vault_artifactory_artifact/outputs.tf +++ b/enos/modules/vault_artifactory_artifact/outputs.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - output "url" { value = data.enos_artifactory_item.vault.results[0].url diff --git a/enos/modules/vault_artifactory_artifact/variables.tf b/enos/modules/vault_artifactory_artifact/variables.tf index 7b641ce98f957..778354e7deeab 100644 --- a/enos/modules/vault_artifactory_artifact/variables.tf +++ b/enos/modules/vault_artifactory_artifact/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "artifactory_username" { type = string diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf index 25a8902108e13..a288798e60d7d 100644 --- a/enos/modules/vault_get_cluster_ips/main.tf +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh index 98b2d21fdda6c..360e9d79d6a84 100644 --- a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf index a0da72249e1fd..f1d33d9c0337b 100644 --- a/enos/modules/vault_raft_remove_peer/main.tf +++ b/enos/modules/vault_raft_remove_peer/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh index ab49f76ba8204..6d967ee30d5e6 100644 --- a/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh +++ b/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf index 268a5323ccf1d..85f5cc910c764 100644 --- a/enos/modules/vault_setup_perf_primary/main.tf +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh index d15699c4b9667..679729f3f5a3d 100644 --- a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_setup_perf_secondary/main.tf b/enos/modules/vault_setup_perf_secondary/main.tf index 3be3ca8897f7f..1fac54b94d2c6 100644 --- a/enos/modules/vault_setup_perf_secondary/main.tf +++ b/enos/modules/vault_setup_perf_secondary/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_test_ui/main.tf b/enos/modules/vault_test_ui/main.tf index 0e051bdf48cff..2af426232e46b 100644 --- a/enos/modules/vault_test_ui/main.tf +++ b/enos/modules/vault_test_ui/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_test_ui/outputs.tf b/enos/modules/vault_test_ui/outputs.tf index 887d030a7899d..abe4924cebe7d 100644 --- a/enos/modules/vault_test_ui/outputs.tf +++ b/enos/modules/vault_test_ui/outputs.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - output "ui_test_stderr" { value = var.ui_run_tests ? enos_local_exec.test_ui[0].stderr : "No std out tests where not run" } diff --git a/enos/modules/vault_test_ui/scripts/test_ui.sh b/enos/modules/vault_test_ui/scripts/test_ui.sh index e7cf7e9564ed9..f84cb929f878d 100755 --- a/enos/modules/vault_test_ui/scripts/test_ui.sh +++ b/enos/modules/vault_test_ui/scripts/test_ui.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -eux -o pipefail diff --git a/enos/modules/vault_test_ui/variables.tf b/enos/modules/vault_test_ui/variables.tf index c2db5c57b9bba..807cf01aea7fa 100644 --- a/enos/modules/vault_test_ui/variables.tf +++ b/enos/modules/vault_test_ui/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_addr" { description = "The host address for the vault instance to test" type = string diff --git a/enos/modules/vault_unseal_nodes/main.tf b/enos/modules/vault_unseal_nodes/main.tf index b8b86b3ba9900..2bd1c6c0fbf38 100644 --- a/enos/modules/vault_unseal_nodes/main.tf +++ b/enos/modules/vault_unseal_nodes/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # This module unseals the replication secondary follower nodes terraform { required_providers { diff --git a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh index b3f77de5041cf..6fe00a93de295 100755 --- a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh +++ b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - binpath=${VAULT_INSTALL_DIR}/vault diff --git a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh index af935578781ac..d0ebb1f067caf 100644 --- a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh +++ b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - binpath=${VAULT_INSTALL_DIR}/vault diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf index 5502212d51518..07e65bf197f96 100644 --- a/enos/modules/vault_upgrade/main.tf +++ b/enos/modules/vault_upgrade/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { aws = { diff --git a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh b/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh index 127be64499cf3..e424aa44406ce 100644 --- a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh +++ b/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh b/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh index d64a6c16ed366..5c36dae336f5c 100644 --- a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh +++ b/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_upgrade/templates/restart-vault.sh b/enos/modules/vault_upgrade/templates/restart-vault.sh index fc6b007a35097..aa68536430560 100644 --- a/enos/modules/vault_upgrade/templates/restart-vault.sh +++ b/enos/modules/vault_upgrade/templates/restart-vault.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -eux diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf index 850ea5366c1e6..6643c8b626620 100644 --- a/enos/modules/vault_verify_agent_output/main.tf +++ b/enos/modules/vault_verify_agent_output/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh index cd25a01c8d021..3c434ba972762 100644 --- a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh +++ b/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf index b1d050af29595..ca03ea6f6f29c 100644 --- a/enos/modules/vault_verify_autopilot/main.tf +++ b/enos/modules/vault_verify_autopilot/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh index 129288a3cd212..1dd5d90147100 100755 --- a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh +++ b/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - token="${vault_token}" autopilot_version="${vault_autopilot_upgrade_version}" diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf index 6604f8c126eaf..a44eec5b0dc6b 100644 --- a/enos/modules/vault_verify_performance_replication/main.tf +++ b/enos/modules/vault_verify_performance_replication/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf index 44df4496ab9e5..ded9c3cc7007f 100644 --- a/enos/modules/vault_verify_raft_auto_join_voter/main.tf +++ b/enos/modules/vault_verify_raft_auto_join_voter/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh index 3187ac69fbf00..e1172d7158f61 100644 --- a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh +++ b/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_verify_read_data/main.tf b/enos/modules/vault_verify_read_data/main.tf index 8a4d7ea7ace5f..a104e1d677a06 100644 --- a/enos/modules/vault_verify_read_data/main.tf +++ b/enos/modules/vault_verify_read_data/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_read_data/scripts/verify-data.sh b/enos/modules/vault_verify_read_data/scripts/verify-data.sh index 5c095c58caf8e..d150d8f7efc41 100644 --- a/enos/modules/vault_verify_read_data/scripts/verify-data.sh +++ b/enos/modules/vault_verify_read_data/scripts/verify-data.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf index fbb360a38da4c..57a97f9ddd1b7 100644 --- a/enos/modules/vault_verify_replication/main.tf +++ b/enos/modules/vault_verify_replication/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh index 1ef6207a37eb8..d7bc72f23c246 100644 --- a/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh +++ b/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # The Vault replication smoke test, documented in # https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 diff --git a/enos/modules/vault_verify_replication/variables.tf b/enos/modules/vault_verify_replication/variables.tf index 26ac75c912916..b335ee45efcef 100644 --- a/enos/modules/vault_verify_replication/variables.tf +++ b/enos/modules/vault_verify_replication/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_edition" { type = string diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf index 32986072cba77..5703326d1a510 100644 --- a/enos/modules/vault_verify_ui/main.tf +++ b/enos/modules/vault_verify_ui/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { diff --git a/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh index 2ec23a107332b..bcd7e1cc3055e 100644 --- a/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh +++ b/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_verify_ui/variables.tf b/enos/modules/vault_verify_ui/variables.tf index 344f0d8077d7b..7eaf5d1bf7f4a 100644 --- a/enos/modules/vault_verify_ui/variables.tf +++ b/enos/modules/vault_verify_ui/variables.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - variable "vault_install_dir" { type = string diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf index 717d90735fd07..c856ca4c92c59 100644 --- a/enos/modules/vault_verify_undo_logs/main.tf +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh index ec308dd8bbaf0..5761ea1d74741 100644 --- a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - function fail() { echo "$1" 1>&2 diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf index 45d15418a6327..0b615295cea69 100644 --- a/enos/modules/vault_verify_unsealed/main.tf +++ b/enos/modules/vault_verify_unsealed/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh index c69c253ba4c1a..aefc75ec117a0 100644 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf index 88b4e7a00d42d..9e80f456c3b5a 100644 --- a/enos/modules/vault_verify_version/main.tf +++ b/enos/modules/vault_verify_version/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh b/enos/modules/vault_verify_version/templates/verify-cluster-version.sh index ba5df7488580f..3fd2102351714 100644 --- a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh +++ b/enos/modules/vault_verify_version/templates/verify-cluster-version.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Verify the Vault "version" includes the correct base version, build date, # revision SHA, and edition metadata. diff --git a/enos/modules/vault_verify_write_data/main.tf b/enos/modules/vault_verify_write_data/main.tf index 2369e51dd4046..afb2cf2bc28cc 100644 --- a/enos/modules/vault_verify_write_data/main.tf +++ b/enos/modules/vault_verify_write_data/main.tf @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh index 2d3e81c2161a3..0e32060810bef 100644 --- a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh +++ b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh index 98b6392c580cb..62b357cc897b9 100644 --- a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh +++ b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/go.mod b/go.mod index 04b5cdbbaede2..fe34e6dc9568d 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,6 @@ module github.com/hashicorp/vault -// The go version directive value isn't consulted when building our production binaries, -// and the vault module isn't intended to be imported into other projects. As such the -// impact of this setting is usually rather limited. Note however that in some cases the -// Go project introduces new semantics for handling of go.mod depending on the value. -// -// The general policy for updating it is: when the Go major version used on the branch is -// updated. If we choose not to do so at some point (e.g. because we don't want some new -// semantic related to Go module handling), this comment should be updated to explain that. -// -// Whenever this value gets updated, sdk/go.mod should be updated to the same value. -go 1.20 +go 1.19 replace github.com/hashicorp/vault/api => ./api @@ -22,26 +12,26 @@ replace github.com/hashicorp/vault/api/auth/userpass => ./api/auth/userpass replace github.com/hashicorp/vault/sdk => ./sdk +replace go.etcd.io/etcd/client/pkg/v3 v3.5.0 => go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 + require ( - cloud.google.com/go/monitoring v1.13.0 - cloud.google.com/go/spanner v1.45.0 - cloud.google.com/go/storage v1.28.1 - github.com/Azure/azure-storage-blob-go v0.15.0 - github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/adal v0.9.22 + cloud.google.com/go/monitoring v1.2.0 + cloud.google.com/go/spanner v1.5.1 + cloud.google.com/go/storage v1.10.0 + github.com/Azure/azure-storage-blob-go v0.14.0 + github.com/Azure/go-autorest/autorest v0.11.24 + github.com/Azure/go-autorest/autorest/adal v0.9.18 github.com/NYTimes/gziphandler v1.1.1 - github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 github.com/SAP/go-hdb v0.14.1 github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a github.com/aerospike/aerospike-client-go/v5 v5.6.0 - github.com/aliyun/alibaba-cloud-sdk-go v1.62.301 + github.com/aliyun/alibaba-cloud-sdk-go v1.61.1499 github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 - github.com/armon/go-metrics v0.4.1 + github.com/armon/go-metrics v0.4.0 github.com/armon/go-radix v1.0.0 github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef - github.com/aws/aws-sdk-go v1.44.268 - github.com/aws/aws-sdk-go-v2/config v1.18.19 + github.com/aws/aws-sdk-go v1.44.95 github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a github.com/cenkalti/backoff/v3 v3.2.2 github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 @@ -49,57 +39,53 @@ require ( github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/denisenkom/go-mssqldb v0.12.2 + github.com/docker/docker v20.10.18+incompatible + github.com/docker/go-connections v0.4.0 github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 github.com/dustin/go-humanize v1.0.0 - github.com/fatih/color v1.15.0 + github.com/fatih/color v1.13.0 github.com/fatih/structs v1.1.0 - github.com/favadi/protoc-go-inject-tag v1.4.0 + github.com/favadi/protoc-go-inject-tag v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-errors/errors v1.4.2 - github.com/go-jose/go-jose/v3 v3.0.0 - github.com/go-ldap/ldap/v3 v3.4.4 + github.com/go-errors/errors v1.4.1 + github.com/go-ldap/ldap/v3 v3.4.1 github.com/go-sql-driver/mysql v1.6.0 - github.com/go-test/deep v1.1.0 - github.com/go-zookeeper/zk v1.0.3 + github.com/go-test/deep v1.0.8 github.com/gocql/gocql v1.0.0 - github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/golang/protobuf v1.5.3 - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 - github.com/google/go-cmp v0.5.9 + github.com/golang-jwt/jwt/v4 v4.3.0 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.8 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.2.0 - github.com/google/tink/go v1.7.0 - github.com/hashicorp/cap v0.3.1 - github.com/hashicorp/consul-template v0.32.0 - github.com/hashicorp/consul/api v1.20.0 + github.com/google/tink/go v1.6.1 + github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 + github.com/hashicorp/consul-template v0.29.5 + github.com/hashicorp/consul/api v1.15.2 github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/eventlogger v0.1.1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 github.com/hashicorp/go-gcp-common v0.8.0 - github.com/hashicorp/go-hclog v1.5.0 - github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.9 - github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 + github.com/hashicorp/go-hclog v1.3.1 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 + github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 - github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0 + github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1 github.com/hashicorp/go-memdb v1.3.3 github.com/hashicorp/go-msgpack v1.1.5 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.9 + github.com/hashicorp/go-plugin v1.4.5 github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a - github.com/hashicorp/go-retryablehttp v0.7.2 + github.com/hashicorp/go-retryablehttp v0.7.1 github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-secure-stdlib/awsutil v0.2.2 + github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 - github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 - github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 github.com/hashicorp/go-secure-stdlib/password v0.1.1 github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 @@ -111,57 +97,54 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcl v1.0.1-vault-5 - github.com/hashicorp/hcl/v2 v2.16.2 - github.com/hashicorp/hcp-link v0.1.0 - github.com/hashicorp/hcp-scada-provider v0.2.1 - github.com/hashicorp/hcp-sdk-go v0.23.0 - github.com/hashicorp/nomad/api v0.0.0-20230519153805-2275a83cbfdf + github.com/hashicorp/hcp-sdk-go v0.22.0 + github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 github.com/hashicorp/raft v1.3.10 - github.com/hashicorp/raft-autopilot v0.2.0 + github.com/hashicorp/raft-autopilot v0.1.6 github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c github.com/hashicorp/raft-snapshot v1.0.4 - github.com/hashicorp/vault-plugin-auth-alicloud v0.15.0 - github.com/hashicorp/vault-plugin-auth-azure v0.15.0 - github.com/hashicorp/vault-plugin-auth-centrify v0.15.1 - github.com/hashicorp/vault-plugin-auth-cf v0.15.0 - github.com/hashicorp/vault-plugin-auth-gcp v0.16.0 - github.com/hashicorp/vault-plugin-auth-jwt v0.16.0 - github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0 - github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0 - github.com/hashicorp/vault-plugin-auth-oci v0.14.0 - github.com/hashicorp/vault-plugin-database-couchbase v0.9.2 - github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.2 - github.com/hashicorp/vault-plugin-database-mongodbatlas v0.10.0 - github.com/hashicorp/vault-plugin-database-redis v0.2.1 - github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.1 - github.com/hashicorp/vault-plugin-database-snowflake v0.8.0 + github.com/hashicorp/vault-plugin-auth-alicloud v0.13.0 + github.com/hashicorp/vault-plugin-auth-azure v0.12.0 + github.com/hashicorp/vault-plugin-auth-centrify v0.13.0 + github.com/hashicorp/vault-plugin-auth-cf v0.13.0 + github.com/hashicorp/vault-plugin-auth-gcp v0.14.0 + github.com/hashicorp/vault-plugin-auth-jwt v0.14.2 + github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0 + github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.1 + github.com/hashicorp/vault-plugin-auth-oci v0.12.0 + github.com/hashicorp/vault-plugin-database-couchbase v0.8.0 + github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0 + github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 + github.com/hashicorp/vault-plugin-database-redis v0.1.0 + github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0 + github.com/hashicorp/vault-plugin-database-snowflake v0.6.1 github.com/hashicorp/vault-plugin-mock v0.16.1 - github.com/hashicorp/vault-plugin-secrets-ad v0.16.0 - github.com/hashicorp/vault-plugin-secrets-alicloud v0.15.0 - github.com/hashicorp/vault-plugin-secrets-azure v0.16.0 - github.com/hashicorp/vault-plugin-secrets-gcp v0.16.0 - github.com/hashicorp/vault-plugin-secrets-gcpkms v0.15.0 - github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0 - github.com/hashicorp/vault-plugin-secrets-kv v0.15.0 - github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.0 - github.com/hashicorp/vault-plugin-secrets-openldap v0.11.0 - github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1 - github.com/hashicorp/vault-testing-stepwise v0.1.3 - github.com/hashicorp/vault/api v1.9.2 + github.com/hashicorp/vault-plugin-secrets-ad v0.14.1 + github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0 + github.com/hashicorp/vault-plugin-secrets-azure v0.14.1 + github.com/hashicorp/vault-plugin-secrets-gcp v0.14.1 + github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0 + github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0 + github.com/hashicorp/vault-plugin-secrets-kv v0.13.3 + github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0 + github.com/hashicorp/vault-plugin-secrets-openldap v0.9.1 + github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0 + github.com/hashicorp/vault-testing-stepwise v0.1.2 + github.com/hashicorp/vault/api v1.8.0 github.com/hashicorp/vault/api/auth/approle v0.1.0 github.com/hashicorp/vault/api/auth/userpass v0.1.0 - github.com/hashicorp/vault/sdk v0.9.2-0.20230530190758-08ee474850e0 - github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 + github.com/hashicorp/vault/sdk v0.6.1-0.20230427140652-b4b396ffc14f github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/jackc/pgx/v4 v4.15.0 - github.com/jcmturner/gokrb5/v8 v8.4.4 + github.com/jcmturner/gokrb5/v8 v8.4.2 github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f github.com/jefferai/jsonx v1.0.0 github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f - github.com/kr/pretty v0.3.1 + github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f + github.com/kr/pretty v0.3.0 github.com/kr/text v0.2.0 - github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.19 + github.com/mattn/go-colorable v0.1.12 + github.com/mattn/go-isatty v0.0.14 github.com/mholt/archiver/v3 v3.5.1 github.com/michaelklishin/rabbit-hole/v2 v2.12.0 github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a @@ -176,214 +159,175 @@ require ( github.com/ncw/swift v1.0.47 github.com/oklog/run v1.1.0 github.com/okta/okta-sdk-golang/v2 v2.12.1 - github.com/oracle/oci-go-sdk v24.3.0+incompatible + github.com/oracle/oci-go-sdk v13.1.0+incompatible github.com/ory/dockertest v3.3.5+incompatible - github.com/ory/dockertest/v3 v3.10.0 + github.com/ory/dockertest/v3 v3.9.1 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pires/go-proxyproto v0.6.1 github.com/pkg/errors v0.9.1 github.com/posener/complete v1.2.3 github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/common v0.37.0 + github.com/prometheus/client_golang v1.11.1 + github.com/prometheus/common v0.26.0 github.com/rboyer/safeio v0.2.1 github.com/ryanuber/columnize v2.1.0+incompatible github.com/ryanuber/go-glob v1.0.0 + github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da github.com/sasha-s/go-deadlock v0.2.0 github.com/sethvargo/go-limiter v0.7.1 github.com/shirou/gopsutil/v3 v3.22.6 - github.com/stretchr/testify v1.8.4 - go.etcd.io/bbolt v1.3.7 - go.etcd.io/etcd/client/pkg/v3 v3.5.7 - go.etcd.io/etcd/client/v2 v2.305.5 - go.etcd.io/etcd/client/v3 v3.5.7 - go.mongodb.org/atlas v0.28.0 - go.mongodb.org/mongo-driver v1.11.6 - go.opentelemetry.io/otel v1.14.0 - go.opentelemetry.io/otel/sdk v1.14.0 - go.opentelemetry.io/otel/trace v1.14.0 - go.uber.org/atomic v1.11.0 - go.uber.org/goleak v1.2.1 - golang.org/x/crypto v0.9.0 - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 - golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.8.0 - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.8.0 - golang.org/x/term v0.8.0 - golang.org/x/text v0.9.0 - golang.org/x/tools v0.7.0 - google.golang.org/api v0.124.0 - google.golang.org/grpc v1.55.0 + github.com/stretchr/testify v1.8.0 + go.etcd.io/bbolt v1.3.6 + go.etcd.io/etcd/client/pkg/v3 v3.5.0 + go.etcd.io/etcd/client/v2 v2.305.0 + go.etcd.io/etcd/client/v3 v3.5.0 + go.mongodb.org/atlas v0.15.0 + go.mongodb.org/mongo-driver v1.7.3 + go.opentelemetry.io/otel v0.20.0 + go.opentelemetry.io/otel/sdk v0.20.0 + go.opentelemetry.io/otel/trace v0.20.0 + go.uber.org/atomic v1.9.0 + go.uber.org/goleak v1.1.12 + golang.org/x/crypto v0.8.0 + golang.org/x/net v0.9.0 + golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 + golang.org/x/sys v0.7.0 + golang.org/x/term v0.7.0 + golang.org/x/tools v0.6.0 + google.golang.org/api v0.83.0 + google.golang.org/grpc v1.47.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 - google.golang.org/protobuf v1.30.0 + google.golang.org/protobuf v1.28.1 + gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/ory-am/dockertest.v3 v3.3.4 + gopkg.in/square/go-jose.v2 v2.6.0 gotest.tools/gotestsum v1.9.0 - honnef.co/go/tools v0.4.3 - k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 layeh.com/radius v0.0.0-20190322222518-890bc1058917 - mvdan.cc/gofumpt v0.3.1 - nhooyr.io/websocket v1.8.7 + mvdan.cc/gofumpt v0.1.1 ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.19.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.0.1 // indirect - cloud.google.com/go/kms v1.10.2 // indirect + cloud.google.com/go v0.100.2 // indirect + cloud.google.com/go/compute v1.6.1 // indirect + cloud.google.com/go/iam v0.3.0 // indirect + cloud.google.com/go/kms v1.4.0 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect - github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect - github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v67.2.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/azure-sdk-for-go v62.0.0+incompatible // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect + github.com/BurntSushi/toml v1.2.0 // indirect github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/Jeffail/gabs v1.1.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/hcsshim v0.9.0 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/agext/levenshtein v1.2.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect - github.com/aws/smithy-go v1.13.5 // indirect - github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 // indirect + github.com/aws/aws-sdk-go-v2 v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0 // indirect + github.com/aws/smithy-go v1.7.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/boombuler/barcode v1.0.1 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect - github.com/cloudflare/circl v1.1.0 // indirect github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 // indirect - github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect - github.com/containerd/containerd v1.7.0 // indirect + github.com/containerd/cgroups v1.0.3 // indirect + github.com/containerd/containerd v1.5.13 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/coreos/go-oidc/v3 v3.5.0 // indirect + github.com/coreos/go-oidc/v3 v3.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/couchbase/gocb/v2 v2.6.3 // indirect - github.com/couchbase/gocbcore/v10 v10.2.3 // indirect - github.com/danieljoos/wincred v1.1.2 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/couchbase/gocb/v2 v2.3.3 // indirect + github.com/couchbase/gocbcore/v10 v10.0.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/digitalocean/godo v1.7.5 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dnephin/pflag v1.0.7 // indirect - github.com/docker/cli v20.10.20+incompatible // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v23.0.4+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/cli v20.10.18+incompatible // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/envoyproxy/go-control-plane v0.11.0 // indirect - github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/gammazero/deque v0.2.1 // indirect - github.com/gammazero/workerpool v1.1.3 // indirect - github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/gabriel-vasile/mimetype v1.3.1 // indirect + github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 // indirect + github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/logr v1.2.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.20.0 // indirect - github.com/go-openapi/errors v0.20.1 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/errors v0.19.9 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/loads v0.20.2 // indirect github.com/go-openapi/runtime v0.19.24 // indirect github.com/go-openapi/spec v0.20.3 // indirect github.com/go-openapi/strfmt v0.20.0 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.19.14 // indirect github.com/go-openapi/validate v0.20.2 // indirect github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect - github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect - github.com/gofrs/uuid v4.3.0+incompatible // indirect + github.com/go-stack/stack v1.8.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v23.1.21+incompatible // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect + github.com/google/gofuzz v1.1.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.9.1 // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect + github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 // indirect github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect - github.com/hashicorp/go-slug v0.11.1 // indirect - github.com/hashicorp/go-tfe v1.25.1 // indirect + github.com/hashicorp/go-slug v0.7.0 // indirect + github.com/hashicorp/go-tfe v0.20.0 // indirect github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect - github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect - github.com/hashicorp/serf v0.10.1 // indirect - github.com/hashicorp/vault/api/auth/kubernetes v0.4.0 // indirect + github.com/hashicorp/serf v0.9.7 // indirect + github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.15 // indirect + github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.11.0 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -394,7 +338,7 @@ require ( github.com/jackc/pgx v3.3.0+incompatible // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect @@ -402,67 +346,59 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/compress v1.13.6 // indirect github.com/klauspost/pgzip v1.2.5 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect github.com/lib/pq v1.10.6 // indirect github.com/linode/linodego v0.7.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mediocregopher/radix/v4 v4.1.2 // indirect - github.com/miekg/dns v1.1.43 // indirect - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mediocregopher/radix/v4 v4.1.1 // indirect + github.com/miekg/dns v1.1.41 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/pointerstructure v1.2.1 // indirect - github.com/moby/patternmatcher v0.5.0 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/mitchellh/pointerstructure v1.2.0 // indirect + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.5.0 // indirect + github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mongodb-forks/digest v1.0.4 // indirect - github.com/montanaflynn/stats v0.7.0 // indirect - github.com/mtibben/percent v0.2.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mongodb-forks/digest v1.0.3 // indirect github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect github.com/nwaples/rardecode v1.1.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect - github.com/opencontainers/runc v1.1.6 // indirect - github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.1.4 // indirect + github.com/openlyinc/pointy v1.1.2 // indirect github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/pierrec/lz4/v4 v4.1.8 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/snowflakedb/gosnowflake v1.6.18 // indirect + github.com/snowflakedb/gosnowflake v1.6.3 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect - github.com/spf13/cast v1.5.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect - github.com/tilinna/clock v1.1.0 // indirect + github.com/tilinna/clock v1.0.2 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vmware/govmomi v0.18.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.1.1 // indirect - github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/xdg-go/scram v1.0.2 // indirect + github.com/xdg-go/stringprep v1.0.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect @@ -470,30 +406,28 @@ require ( github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - go.etcd.io/etcd/api/v3 v3.5.7 // indirect - go.opencensus.io v0.24.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.0 // indirect + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel/metric v0.20.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect + golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230525154841-bd750badd5c6 // indirect + google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/resty.v1 v1.12.0 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.27.2 // indirect - k8s.io/apimachinery v0.27.2 // indirect - k8s.io/client-go v0.27.2 // indirect - k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/api v0.22.2 // indirect + k8s.io/apimachinery v0.22.2 // indirect + k8s.io/client-go v0.22.2 // indirect + k8s.io/klog/v2 v2.60.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 1a065c57ff6da..474e1c61dd613 100644 --- a/go.sum +++ b/go.sum @@ -6,7 +6,6 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -19,7 +18,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -32,567 +30,85 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds= -cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v1.0.1 h1:lyeCAU6jpnVNrE9zGQkTl3WgNgK/X+uWwaw0kynZJMU= -cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.2 h1:8UePKEypK3SQ6g+4mn/s/VgE5L7XOh+FwGGRUqvY3Hw= -cloud.google.com/go/kms v1.10.2/go.mod h1:9mX3Q6pdroWzL20pbK6RaOdBbXBEhMNgK4Pfz2bweb4= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/monitoring v1.2.0 h1:fEvQITrhVcPM6vuDQcgPMbU5kZFeQFwZmE7v6+S8BPo= +cloud.google.com/go/monitoring v1.2.0/go.mod h1:tE8I08OzjWmXLhCopnPaUDpfGOEJOonfWXGR9E9SsFo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0 h1:7VdjZ8zj4sHbDw55atp5dfY6kn1j9sam9DRNpPQhqR4= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/spanner v1.5.1 h1:dWyj10TLlaxH2No6+tXsSCaq9oWgrRbXy1N3x/bhMGU= +cloud.google.com/go/spanner v1.5.1/go.mod h1:e1+8M6PF3ntV9Xr57X2Gf+UhylXXYF6gI4WRZ1kfu2A= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= -github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= -github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20221206110420-d395f97c4830/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= -github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v58.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v62.0.0+incompatible h1:8N2k27SYtc12qj5nTsuFMFJPZn5CGmgMWqTy4y9I7Jw= +github.com/Azure/azure-sdk-for-go v62.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.1.0 h1:Q707jfTFqfunSnh73YkCBDXR3GQJKno3chPRxXw//ho= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.1.0/go.mod h1:vjoxsjVnPwhjHZw4PuuhpgYlcxWl5tyNedLHUl0ulFA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= -github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= -github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM= +github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -601,11 +117,11 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= @@ -616,36 +132,27 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -654,11 +161,10 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -666,15 +172,9 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= -github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= +github.com/Microsoft/hcsshim v0.9.0 h1:BBgYMxl5YZDZVIijz02AlDINpYZOzQqRNCl9CZM13vk= +github.com/Microsoft/hcsshim v0.9.0/go.mod h1:VBJWdC71NSWPlEo7lwde1aL21748J8B6Sdgno7NqEGE= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -683,13 +183,12 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 h1:NsReiLpErIPzRrnogAXYwSoU7txA977LjDGrbkewJbg= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= @@ -700,23 +199,16 @@ github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KM github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/aerospike/aerospike-client-go/v5 v5.6.0 h1:tRxcUq0HY8fFPQEzF3EgrknF+w1xFO0YDfUb9Nm8yRI= github.com/aerospike/aerospike-client-go/v5 v5.6.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= -github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= -github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= -github.com/aliyun/alibaba-cloud-sdk-go v1.62.301 h1:8mgvCpqsv3mQAcqZ/baAaMGUBj5J6MKMhxLd+K8L27Q= -github.com/aliyun/alibaba-cloud-sdk-go v1.62.301/go.mod h1:Api2AkmMgGaSUAhmk76oaFObkoeCPc/bKAqcyplPODs= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1499 h1:P2FUu1/xkj4abuHcqdRQO9ZAYc9hSWG5c5gifsU/Ogc= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1499/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= @@ -724,14 +216,8 @@ github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ= -github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 h1:ZsPrlYPY/v1PR7pGrmYD/rq5BFiSPalH8i9eEkSfnnI= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -740,12 +226,12 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= +github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -754,74 +240,41 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGL github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.43.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.268 h1:WoK20tlAvsvQzTcE6TajoprbXmTbcud6MjhErL4P/38= -github.com/aws/aws-sdk-go v1.44.268/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8/go.mod h1:JTnlBSot91steJeti4ryyu/tLd4Sk84O5W22L7O2EQU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= -github.com/aws/aws-sdk-go-v2/config v1.17.7/go.mod h1:dN2gja/QXxFF15hQreyrqYhLBaQo1d9ZKe/v/uplQoI= -github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= -github.com/aws/aws-sdk-go-v2/credentials v1.12.20/go.mod h1:UKY5HyIux08bbNA7Blv4PcXQ8cTkGh7ghHMFklaviR4= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33/go.mod h1:84XgODVR8uRhmOnUkKGUZKqIMxmjmLOR8Uyp7G/TPwc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14/go.mod h1:AyGgqiKv9ECM6IZeNQtdT8NnMvUb3/2wokeq2Fgryto= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9/go.mod h1:a9j48l6yL5XINLHLcOKInjdvknN+vWqPBxqeIDw7ktw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18/go.mod h1:NS55eQ4YixUJPTC+INxi2/jCqe1y2Uw3rnh9wEOVJxY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17/go.mod h1:YqMdV+gEKCQ59NrB7rzrJdALeBIsYiVi8Inj3+KcqHI= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11/go.mod h1:fmgDANqTUCxciViKl9hb/zD5LFbvPINFRgWhDbR+vZo= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= -github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.95 h1:QwmA+PeR6v4pF0f/dPHVPWGAshAhb9TnGZBTM5uKuI8= +github.com/aws/aws-sdk-go v1.44.95/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0 h1:Iqp2aHeRF3kaaNuDS82bHBzER285NM6lLPAgsxHCR2A= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2 h1:YcGVEqLQGHDa81776C3daai6ZkkRGf/8RAQ07hV0QcU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2 h1:ewIpdVz12MDinJJB/nu1uUiFIWFnvtd3iV7cEW7lR+M= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0 h1:cxZbzTYXgiQrZ6u2/RJZAkkgZssqYOdydvJPBgIHlsM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -835,49 +288,32 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYE github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 h1:jQ93fKqb/wRmK/KiHpa7Tk9rmHeKXhp4j+5Sg/tENiY= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166/go.mod h1:c/gmvyN8lq6lYtHvrqqoXrg2xyN65N0mBmbikxFWXNE= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -892,43 +328,30 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 h1:k8q2Nsz7kNaUlysVCnWIFLMUSqiKXaGLdIf9P0GsX2Y= github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/container-orchestrated-devices/container-device-interface v0.5.4/go.mod h1:DjE95rfPiiSmG7uVXtg0z6MnPm/Lx4wxKCIts0ZE0vg= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -936,7 +359,6 @@ github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= @@ -944,9 +366,8 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -961,28 +382,24 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= -github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= -github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ= -github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= -github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= +github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= +github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= +github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -991,13 +408,8 @@ github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZ github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= -github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -1007,28 +419,19 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= -github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= -github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.3.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4= -github.com/containerd/ttrpc v1.2.1/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/typeurl/v2 v2.1.0/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= @@ -1037,32 +440,21 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= -github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= -github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw= -github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM= +github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= +github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -1074,54 +466,38 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pq github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/couchbase/gocb/v2 v2.6.3 h1:5RsMo+RRfK0mVxHLAfpBz3/tHlgXZb1WBNItLk9Ab+c= -github.com/couchbase/gocb/v2 v2.6.3/go.mod h1:yF5F6BHTZ/ZowhEuZbySbXrlI4rHd1TIhm5azOaMbJU= -github.com/couchbase/gocbcore/v10 v10.2.3 h1:PEkRSNSkKjUBXx82Ucr094+anoiCG5GleOOQZOHo6D4= -github.com/couchbase/gocbcore/v10 v10.2.3/go.mod h1:lYQIIk+tzoMcwtwU5GzPbDdqEkwkH3isI2rkSpfL0oM= -github.com/couchbaselabs/gocaves/client v0.0.0-20230307083111-cc3960c624b1/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= -github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2TXy68EGEzIMHOx9UvczR5ApVecwCfQZ0LjkmwMI6g4= -github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= +github.com/couchbase/gocb/v2 v2.3.3 h1:OItaIrFqXR1ba9J77E2YOU+CSF9G9FHYivV26Xgoi98= +github.com/couchbase/gocb/v2 v2.3.3/go.mod h1:h4b3UYDnGI89hMW9VypVjAr+EE0Ki4jjlXJrVdmSZhQ= +github.com/couchbase/gocbcore/v10 v10.0.4 h1:RJ+dSXxMUbrpfgYEEUhMYwPH1S5KvcQYve3D2aKHP28= +github.com/couchbase/gocbcore/v10 v10.0.4/go.mod h1:s6dwBFs4c3+cAzZbo1q0VW+QasudhHJuehE8b8U2YNg= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= -github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= -github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -1130,31 +506,28 @@ github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nb github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.20+incompatible h1:lWQbHSHUFs7KraSN2jOJK7zbMS2jNCHI4mt4xUFUVQ4= -github.com/docker/cli v20.10.20+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9KktZyjev3SIVDU= +github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.20+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= -github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= +github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -1176,15 +549,9 @@ github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -1194,119 +561,75 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/favadi/protoc-go-inject-tag v1.4.0 h1:K3KXxbgRw5WT4f43LbglARGz/8jVsDOS7uMjG4oNvXY= -github.com/favadi/protoc-go-inject-tag v1.4.0/go.mod h1:AZ+PK+QDKUOLlBRG0rYiKkUX5Hw7+7GTFzlU99GFSbQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/favadi/protoc-go-inject-tag v1.3.0 h1:JPrmsmc/uBShG85uY5xGZIa5WJ0IaNZn6LZhQR9tIQE= +github.com/favadi/protoc-go-inject-tag v1.3.0/go.mod h1:SSkUBgfqw2IJ2p7NPNKWk0Idwxt/qIt2LQgFPUgRGtc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= -github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= -github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= -github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/gabriel-vasile/mimetype v1.3.1 h1:qevA6c2MtE1RorlScnixeG0VA1H4xrXyhyX3oWBynNQ= +github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8= +github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= +github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc= +github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= -github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg= +github.com/go-errors/errors v1.4.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.66.6/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-ldap/ldap/v3 v3.1.7/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= -github.com/go-ldap/ldap/v3 v3.4.4 h1:qPjipEpt+qDa6SI/h1fzuGWoRUY+qqQ9sOZq67/PYUs= -github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI= github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 h1:sfz1YppV05y4sYaW7kXZtrocU/+vimnIWt4cxAYh7+o= github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3/go.mod h1:ZXFhGda43Z2TVbfGZefXyMJzsDHhCh0go3bZUcwTx7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -1328,26 +651,22 @@ github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8= -github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -1397,9 +716,8 @@ github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfT github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= @@ -1411,27 +729,16 @@ github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3h github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -1456,29 +763,16 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gocql/gocql v1.0.0 h1:UnbTERpP72VZ/viKE1Q1gPtmLvyTZTvuAstvSRydw/c= github.com/gocql/gocql v1.0.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= -github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -1491,15 +785,13 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1534,29 +826,19 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v23.1.21+incompatible h1:bUqzx/MXCDxuS0hRJL2EfjyZL3uQrPbMocUa8zGqsTA= -github.com/google/flatbuffers v23.1.21+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -1570,11 +852,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= @@ -1583,16 +863,14 @@ github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:od github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -1602,69 +880,50 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= -github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= +github.com/google/tink/go v1.6.1 h1:t7JHqO8Ath2w2ig5vjwQYJzhGEZymedQc90lQXUBa4I= +github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.9.1 h1:DpTpJqzZ3NvX9zqjhIuI1oVzYZMvboZe+3LoeEIJjHM= -github.com/googleapis/gax-go/v2 v2.9.1/go.mod h1:4FG3gMrVZlyMp5itSYKMU9z/lBE7+SbnUOvzH2HqbEY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -1672,34 +931,27 @@ github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/cap v0.3.1 h1:JwX2vg3KIl2+ka4VIPB0yWB9PoPvHL3ACmVrLJLCHDQ= -github.com/hashicorp/cap v0.3.1/go.mod h1:dHTmyMIVbzT981XxRoci5G//dfWmd/HhuNiCH6J5+IA= -github.com/hashicorp/consul-template v0.32.0 h1:VIfKjoJLkBYLgHdLH4mR7RstPc549qqHJiecqPwYTis= -github.com/hashicorp/consul-template v0.32.0/go.mod h1:r9mcCoHVkTeVln7aL4Ky+RfKupOtbEW70i8n9YuEe+w= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 h1:Vgv3jG0kicczshK+lOHWJ9OososZjnjSu1YslqofFYY= +github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= +github.com/hashicorp/consul-template v0.29.5 h1:tzEo93RqODAX2cgOe/ke8xcpdPdxg5rxl6d22wE3f6c= +github.com/hashicorp/consul-template v0.29.5/go.mod h1:SZGBPz/t0JaBwMOqM6q/mG66cBRA8IeDUjOwjO0Pa5M= +github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= +github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/eventlogger v0.1.1 h1:zyCjxsy7KunFsMPZKU5PnwWEakSrp1zjj2vPFmrDaeo= -github.com/hashicorp/eventlogger v0.1.1/go.mod h1://CHt6/j+Q2lc0NlUB5af4aS2M0c0aVBg9/JfcpAyhM= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -1711,64 +963,59 @@ github.com/hashicorp/go-gcp-common v0.8.0 h1:/2vGAbCU1v+BZ3YHXTCzTvxqma9WOJHYtAD github.com/hashicorp/go-gcp-common v0.8.0/go.mod h1:Q7zYRy9ue9SuaEN2s9YLIQs4SoKHdoRmKRcImY3SLgs= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.9 h1:JpCvi97NMA+saNqO8ovQcGoRbBq6P5ZZlJqvOsW5ick= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.9/go.mod h1:NtMaPhqSlfQ72XWDD2g80o8HI8RKkowIB8/WZHMyPY4= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0QqYSUOIj4HOHCVPEFjLqjxyXV/AbA= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1/go.mod h1:b99cDSA+OzcyRoBZroSf174/ss/e6gUuS45wue9ZQfc= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 h1:rOFDv+3k05mnW0oaDLffhVUwg03Csn0mvfO98Wdd2bE= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.5/go.mod h1:sDQAfwJGv25uGPZA04x87ERglCG6avnRcBT9wYoMII8= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 h1:ws2CPDuXMKwaBb2z/duBCdnB9pSxlN2nuDZWXcVj6RU= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4/go.mod h1:dDxt3GXi5QONVHYrJi2+EjsJLCUs59FktZQA8ZMnm+U= github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1/go.mod h1:Sl/ffzV57UAyjtSg1h5Km0rN5+dtzZJm1CUztkoCW2c= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrYyYznfS1+drq4/jFcqHQVNcL7WhUCo= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7/go.mod h1:j5vefRoguQUG7iM4reS/hKIZssU1lZRqNPM5Wow6UnM= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 h1:X27JWuPW6Gmi2l7NMm0pvnp7z7hhtns2TeIOQU93mqI= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7/go.mod h1:i7Dt9mDsVUQG/I639jtdQerliaO2SvvPnpYPhZ8CGZ4= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 h1:16I8OqBEuxZIowwn3jiLvhlx+z+ia4dJc9stvz0yUBU= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8/go.mod h1:6QUMo5BrXAtbzSuZilqmx0A4px2u6PeFK7vfp2WIzeM= -github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM= -github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7/go.mod h1:hxNA5oTfAvwPacWVg1axtF/lvTafwlAa6a6K4uzWHhw= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1 h1:WxpTuafkDjdeeu0Xtk9y3m9YAJhfFMb8+y6eTnxvV8A= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1/go.mod h1:3D5UB9fjot4oUTYGQ5gGmhLJKreyLZeI0XB+NxcLTKs= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1 h1:6joKpqCFveaNMEwC3qna67usws6DjdxqfCuQEHSM0aM= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1/go.mod h1:sDmsWR/W2LqwU217o32RzdHMb/FywGLF72PVIhpZ3hE= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1 h1:+paf/3ompzaXe07BdxkV1vTnqvhwtmZPE4yQnMPTThI= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1/go.mod h1:YRtkersQ2N3iHlPDG5B3xBQtBsNZ3bjmlCwnrl26jVE= +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0 h1:FnWV2E0NLj+yYdhToUQjU81ayCMgURiL2WbJ0V7u/XY= +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0/go.mod h1:17twrc0lM8IpfGqIv69WQvwgDiu3nRwWlk5YfCSQduY= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1 h1:72zlIBTJd2pvYmINqotpvcI4ZXLxhRq2cVPTuqv0xqY= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1/go.mod h1:JytRAxdJViV+unUUWedb7uzEy5pgu7OurbqX0eHEikE= github.com/hashicorp/go-memdb v1.3.3 h1:oGfEWrFuxtIUF3W2q/Jzt6G85TrMk9ey6XfYLvVe1Wo= github.com/hashicorp/go-memdb v1.3.3/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-msgpack/v2 v2.0.0 h1:c1fiLq1LNghmLOry1ipGhvLDi+/zEoaEP2JrE1oFJ9s= -github.com/hashicorp/go-msgpack/v2 v2.0.0/go.mod h1:JIxYkkFJRDDRSoWQBSh7s9QAVThq+82iWmUpmE4jKak= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-plugin v1.4.9 h1:ESiK220/qE0aGxWdzKIvRH69iLiuN/PjoLTm69RoWtU= -github.com/hashicorp/go-plugin v1.4.9/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= +github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= -github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/awsutil v0.2.2 h1:kWg2vyKl7BRXrNxYziqDJ55n+vtOQ1QsGORjzoeB+uM= -github.com/hashicorp/go-secure-stdlib/awsutil v0.2.2/go.mod h1:oKHSQs4ivIfZ3fbXGQOop1XuDfdSb8RIsWTGaAanSfg= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= @@ -1779,11 +1026,8 @@ github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1/go.mod h1:6RoRTSMDK2H/r github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 h1:NS6BHieb/pDfx3M9jDdaPpGyyVp+aD4A3DjX3dgRmzs= github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2/go.mod h1:rf5JPE13wi+NwjgsmGkbg4b2CgHq8v7Htn/F0nDe/hg= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 h1:kH3Rhiht36xhAfhuHyWJDgdXXEx9IIZhDGRk24CDhzg= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.3/go.mod h1:ov1Q0oEDjC3+A4BwsG2YdKltrmEw8sf9Pau4V9JQ4Vo= -github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 h1:iJG9Q3iUme12yH+wzBMGYrw/Am4CfX3sDcA8m5OGfhQ= -github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0/go.mod h1:s28ohJ0kU6tersf0it/WsBCyZSdziPlP+G1FRA3ar28= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= @@ -1797,24 +1041,24 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= -github.com/hashicorp/go-slug v0.11.1 h1:c6lLdQnlhUWbS5I7hw8SvfymoFuy6EmiFDedy6ir994= -github.com/hashicorp/go-slug v0.11.1/go.mod h1:Ib+IWBYfEfJGI1ZyXMGNbu2BU+aa3Dzu41RKLH301v4= +github.com/hashicorp/go-slug v0.7.0 h1:8HIi6oreWPtnhpYd8lIGQBgp4rXzDWQTOhfILZm+nok= +github.com/hashicorp/go-slug v0.7.0/go.mod h1:Ib+IWBYfEfJGI1ZyXMGNbu2BU+aa3Dzu41RKLH301v4= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-tfe v1.25.1 h1:OxjDhY8Rj36n/uTSmhdFRLcnhXFfRTsopiovYSkJjak= -github.com/hashicorp/go-tfe v1.25.1/go.mod h1:1Y6nsdMuJ14lYdc1VMLl/erlthvMzUsJn+WYWaAdSc4= +github.com/hashicorp/go-tfe v0.20.0 h1:XUAhKoCX8ZUQfwBebC8hz7nkSSnqgNkaablIfxnZ0PQ= +github.com/hashicorp/go-tfe v0.20.0/go.mod h1:gyXLXbpBVxA2F/6opah8XBsOkZJxHYQmghl0OWi8keI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -1822,114 +1066,99 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= -github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/hcp-link v0.1.0 h1:F6F1cpADc+o5EBI5CbJn5RX4qdFSLpuA4fN69eeE5lQ= -github.com/hashicorp/hcp-link v0.1.0/go.mod h1:BWVDuJDHrKJtWc5qI07bX5xlLjSgWq6kYLQUeG1g5dM= -github.com/hashicorp/hcp-scada-provider v0.2.1 h1:yr+Uxini7SWTZ2t49d3Xi+6+X/rbsSFx8gq6WVcC91c= -github.com/hashicorp/hcp-scada-provider v0.2.1/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= -github.com/hashicorp/hcp-sdk-go v0.23.0 h1:3WarkQSK0VzxJaH6psHIGQagag3ujL+NjWagZZHpiZM= -github.com/hashicorp/hcp-sdk-go v0.23.0/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= +github.com/hashicorp/hcp-sdk-go v0.22.0 h1:LWkLOkJFYWSojBM3IkwvYK6nrwrL+p4Fw8zEaoCQG10= +github.com/hashicorp/hcp-sdk-go v0.22.0/go.mod h1:mM3nYdVHuv2X2tv88MGVKRf/o2k3zF8jUZSMkwICQ28= github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0= github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= -github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 h1:kBpVVl1sl3MaSrs97e0+pDQhSrqJv9gVbSUrPpVfl1w= -github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= -github.com/hashicorp/nomad/api v0.0.0-20230519153805-2275a83cbfdf h1:cKXVf1UJqwdkGiTF3idqCOLApAql0310OSmJxeiaMWg= -github.com/hashicorp/nomad/api v0.0.0-20230519153805-2275a83cbfdf/go.mod h1:rb38DqjaaIfhJRiLeCAGgIt+wV7o78rB+liyFE3mVzE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 h1:fo8EbQ6tc9hYqxik9CAdFMqy48TW8hh2I3znysPqf+0= +github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28/go.mod h1:FslB+3eLbZgkuPWffqO1GeNzBFw1SuVqN2PXsMNe0Fg= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= -github.com/hashicorp/raft-autopilot v0.2.0 h1:2/R2RPgamgRKgNWGQioULZvjeKXQZmDuw5Ty+6c+H7Y= -github.com/hashicorp/raft-autopilot v0.2.0/go.mod h1:q6tZ8UAZ5xio2gv2JvjgmtOlh80M6ic8xQYBe2Egkg8= +github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= +github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= github.com/hashicorp/raft-snapshot v1.0.4 h1:EuDuayAJPdiDmVk1ygTDnG2zDzrs0/6/yBuma1IYSow= github.com/hashicorp/raft-snapshot v1.0.4/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hashicorp/vault-plugin-auth-alicloud v0.15.0 h1:R2SVwOeVLG5DXzUx42UWhjfFqS0Z9+ncfebPu+gO9VA= -github.com/hashicorp/vault-plugin-auth-alicloud v0.15.0/go.mod h1:YQXpa2s4rGYKm3Oa/Nkgh5SuGVfHFNEIUwDDYWyhloE= -github.com/hashicorp/vault-plugin-auth-azure v0.15.0 h1:OPK3rpRsWUQm/oo8l4N+YS7dka+lUHDT/qxTafSFPzY= -github.com/hashicorp/vault-plugin-auth-azure v0.15.0/go.mod h1:qRCibAYC0AV4s2+HxEwmLMPNLENK1kx2mrq9ldnGdkY= -github.com/hashicorp/vault-plugin-auth-centrify v0.15.1 h1:6StAr5tltpySNgyUwWC8czm9ZqkO7NIZfcRmxxtFwQ8= -github.com/hashicorp/vault-plugin-auth-centrify v0.15.1/go.mod h1:xXs4I5yLxbQ5VHcpvSxkRhShCTXd8Zyrni8qnFrfQ4Y= -github.com/hashicorp/vault-plugin-auth-cf v0.15.0 h1:zIVGlYXCRBY/ElucWdFC9xF27d2QMGMQPm9wSezGREI= -github.com/hashicorp/vault-plugin-auth-cf v0.15.0/go.mod h1:FEIjQkYmzno4MfU36MAjFUG9/JUWeMPxvBG5DRTMYVM= -github.com/hashicorp/vault-plugin-auth-gcp v0.16.0 h1:DA/ZDLCrUsbHS/7Xqkkw7l2SgbQE9rWEHLLWYTGu8rw= -github.com/hashicorp/vault-plugin-auth-gcp v0.16.0/go.mod h1:R0z/qdyxn0uq6hkKgux8KwenjV/n/CCaEz+qOF9GdPg= -github.com/hashicorp/vault-plugin-auth-jwt v0.16.0 h1:BUk03WDSGZuB+kEq3HTOQ7ecEH2Z1Idit42jfB5EnpE= -github.com/hashicorp/vault-plugin-auth-jwt v0.16.0/go.mod h1:Ve3r228afZOShwNvp+MGEKtm+ROskv10GG7bMXZb5OA= -github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0 h1:YH2x9kIV0jKXk22tVkpydhmPeEgprC7IOfN8l0pjF6c= -github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0/go.mod h1:I6ulXug4oxx77DFYjqI1kVl+72TgXEo3Oju4tTOVfU4= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0 h1:vuXNJvtMyoqQ01Sfwf2TNcJNkGcxP1vD3C7gpvuVkCU= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0/go.mod h1:onx9W/rDwENQkN+1yEnJvS51PVkkGAPOBXasne7lnnk= -github.com/hashicorp/vault-plugin-auth-oci v0.14.0 h1:B7uyigqgUAO3gebvi8mMmsq7l4QAG0bLEP6rAKyDVuw= -github.com/hashicorp/vault-plugin-auth-oci v0.14.0/go.mod h1:SYdTtQhzMxqOCbdC0E0UOrkc4eGXXcJmXXbe1MHVPtE= -github.com/hashicorp/vault-plugin-database-couchbase v0.9.2 h1:UWPWUADWUE08a3qeZixd/diIcNIm0NTqdPNTNbUljuQ= -github.com/hashicorp/vault-plugin-database-couchbase v0.9.2/go.mod h1:BvyZMbDEhvT4chbb7lgnL8xsVy9rF+hbDWuJ/eKkgpI= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.2 h1:N81xJfdVjAo49dUu5Wo95C0fv5scpbYL9z4ykWeHxJg= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.2/go.mod h1:P4cUbvtXgvfWZassvJzyXC4nIGRUO72ds9rE5WpQnuw= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.10.0 h1:fgsiuSq3AeFcYnbPkXOLSkKDrS2blaS/6MAmHEIAH28= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.10.0/go.mod h1:jH0OvjQ3Otg0HoOR5NugTqC3JA1KJ+J5OL0NdAzgSb4= -github.com/hashicorp/vault-plugin-database-redis v0.2.1 h1:E+UeZcpNtQO8nMfVebwE5ZS2sJpNjzbKwYJX1y8FFNk= -github.com/hashicorp/vault-plugin-database-redis v0.2.1/go.mod h1:T0i639Xnh2DY5ij8ofS83ZauBh8N0drKzqXYDrH87tM= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.1 h1:D8mdwkB6CyC37wkpdW9mgJNNrqral956bFoVj3AoQoE= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.1/go.mod h1:1RdJ0uxD8Mquzx9DBfoFKkmHgeZrPTN5nZHGyDrVCuY= -github.com/hashicorp/vault-plugin-database-snowflake v0.8.0 h1:Ec7gxxWIhxTmbKNXpmPgREra2go4H7QgDByIvtUwfFw= -github.com/hashicorp/vault-plugin-database-snowflake v0.8.0/go.mod h1:Qq2xNOGOOdHcX9Nr8H0CR4cNfanaGW3ANDrytpMjT1E= +github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault-plugin-auth-alicloud v0.13.0 h1:qQTzTw5qrnQurdE5nuXebltQ0JLJprxm5X8Lg4tCfd4= +github.com/hashicorp/vault-plugin-auth-alicloud v0.13.0/go.mod h1:UO140aqMmOpWVfot9kpowLHhbbJ1alBJBjctIxKtpkY= +github.com/hashicorp/vault-plugin-auth-azure v0.12.0 h1:hhL5qOgIvzIYwROp6Sgg4KNBJPu6bbIdKtdIKpZp2tE= +github.com/hashicorp/vault-plugin-auth-azure v0.12.0/go.mod h1:r0Wlg9xgS7dOx/H2rgkVgXS77m289ON291g+/txFdv8= +github.com/hashicorp/vault-plugin-auth-centrify v0.13.0 h1:IbtgJAY3EFyY+8n9A3QMn3MDGsvfQKDdH60r8G/C0nA= +github.com/hashicorp/vault-plugin-auth-centrify v0.13.0/go.mod h1:3fDbIVdwA/hkOVhwktKHDX5lo4DqIUUVbBdwQNNvxHw= +github.com/hashicorp/vault-plugin-auth-cf v0.13.0 h1:Iu4nRoZrkaLbW4vJ8t/wYS8z5BG4VQI7nKpBuwPTpOU= +github.com/hashicorp/vault-plugin-auth-cf v0.13.0/go.mod h1:Tktv1OXUjFobzjAU5qNJA8t1KC0109eu6Pcgm1uiwHg= +github.com/hashicorp/vault-plugin-auth-gcp v0.14.0 h1:9qjpx7fJcz7WoFiazIktaJSvuqscR2v/drc+euW8v2M= +github.com/hashicorp/vault-plugin-auth-gcp v0.14.0/go.mod h1:WNwaZN7NWy14xcy3otm1OXp5blcKgblUfvE16eYeUoQ= +github.com/hashicorp/vault-plugin-auth-jwt v0.14.2 h1:1lMqkBb8evQqbC7sj+TP9bUd+MLRP6OrSl7xJX5N/30= +github.com/hashicorp/vault-plugin-auth-jwt v0.14.2/go.mod h1:76ygfLIPnzlEHC6/hbzl42B3a91jpMF776iHmX9dcKc= +github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0 h1:5PiNahpVYFnQIg0Np3wLiFnfhHfnAHcWTl3VSzUVu/Y= +github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0/go.mod h1:eqjae8tMBpAWgJNk1NjV/vtJYXQRZnYudUkBFowz3bY= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.1 h1:P5PR9MXiZVdDz0YnE2XAbsXmytFAu1SzEk2JrFO0gE0= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.1/go.mod h1:rouq4XoBoCzXtECtxGCWHS++g6Nzw2HOms6p6N+Uzkw= +github.com/hashicorp/vault-plugin-auth-oci v0.12.0 h1:7Tuj5q+rwyPm1aS1rsLg2TRo2QIrPTz1qNHGDkUvz18= +github.com/hashicorp/vault-plugin-auth-oci v0.12.0/go.mod h1:oj2gh7qH2VzjelFeul8FzDmmYrJXnCuLUUeQAA6fMN8= +github.com/hashicorp/vault-plugin-database-couchbase v0.8.0 h1:lDZ1OazKfSPIb1DXLbq7NCf1BZwB1cFN3OG3NedXB/s= +github.com/hashicorp/vault-plugin-database-couchbase v0.8.0/go.mod h1:skmG6MgIG6fjIOlOEgVKOcNlr1PcgHPUb9q1YQ5+Q9k= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0 h1:g+jD35qUZlDcS2YWQBqXbfpMNBTvGEvRzSYjwLgWOK4= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0/go.mod h1:wO8EPQs5bsBERD6MSQ+7Az+YJ4TFclCNxBo3r3VKeao= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 h1:wx/9Dh9YGGU7GiijwRfwPFBlWdmBEdf6n2VhgTdRtJU= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0/go.mod h1:eWwd1Ba7aLU1tIAtmFsEhu9E023jkkypHawxhnAbZfc= +github.com/hashicorp/vault-plugin-database-redis v0.1.0 h1:fDT32ZphGdvVdenvieWb+ZjWmCOHFtZ1Qjv581BloHw= +github.com/hashicorp/vault-plugin-database-redis v0.1.0/go.mod h1:bzrD2dQUClKcl89yYsaZqboFDEzst+TpXROWuhVxLEM= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0 h1:qwDcp1vdlT0Io0x5YjtvhXtndfQB66jnDICg7NHxKQk= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0/go.mod h1:gB/SMtnIf0NdDyPSIo0KgSNp1ajTvLDiwP+lIAy8uHs= +github.com/hashicorp/vault-plugin-database-snowflake v0.6.1 h1:jnWNKqRmRXNpXOC4FvAOXHxPKAmDZRVS+d5fcbRQ/Xw= +github.com/hashicorp/vault-plugin-database-snowflake v0.6.1/go.mod h1:QJ8IL/Qlu4Me1KkL0OpaWO7aMFL0TNoSEKVB5F+lCiM= github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= -github.com/hashicorp/vault-plugin-secrets-ad v0.16.0 h1:6RCpd2PbBvmi5xmxXhggE0Xv+/Gag896/NNZeMKH+8A= -github.com/hashicorp/vault-plugin-secrets-ad v0.16.0/go.mod h1:6IeXly3xi+dVodzFSx6aVZjdhd3syboPyhxr1/WMcyo= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.15.0 h1:uVpcx2s3PwYXSOHmjA/Ai6+V0c3wgvSApELZez8b9mI= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.15.0/go.mod h1:wMTkhPGxDa2PCdSBqd6A8SMcRrltu3NRbwX8m8W1MCU= -github.com/hashicorp/vault-plugin-secrets-azure v0.16.0 h1:4Y2LG2P6XUy4HLlObJtHiveJBQwZ4kazs0EpxDmAal0= -github.com/hashicorp/vault-plugin-secrets-azure v0.16.0/go.mod h1:tNzshPyCxkuOL4PLF3cybN/XaSlWgvfl6dwEbCARybY= -github.com/hashicorp/vault-plugin-secrets-gcp v0.16.0 h1:5ozLtt38Bw/DLt37dbccT8j56A+2T7CWFfYecKleGl4= -github.com/hashicorp/vault-plugin-secrets-gcp v0.16.0/go.mod h1:Ax9/ALmpzyjU8mcqHVYR9lwjcyazdmimrShDYeK9CHc= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.15.0 h1:CueteKXEuO52qGu1nUaDc/euSTSfQD9MONkXuvWdZQw= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.15.0/go.mod h1:a0Z2DVGd2SPPwLb8edXeHyer3CXei/Y0cb7EFkiFMfA= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0 h1:g0W1ybHjO945jDtuDEFcqTINyW/s06wxZarE/7aLumc= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0/go.mod h1:2wobeIypBESGQYmhv12vuAorCvfETHpBoMyrb+6QTmQ= -github.com/hashicorp/vault-plugin-secrets-kv v0.15.0 h1:S2d1t4m4ilDNJRdMUzNUimvyu/+ll8huq5QncVgYz+s= -github.com/hashicorp/vault-plugin-secrets-kv v0.15.0/go.mod h1:xu/eiT+BB2b2Gh/AZFJ1xCS8E7S29gOQcuh9VMxros8= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.0 h1:FB860wKclwLBvBHkQb5nq8bGMUAsuw0khrYT1RM0NR0= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.0/go.mod h1:6YPhFm57C3DvPueHEGdTLK94g3gZI/gdiRrSwO5Fym8= -github.com/hashicorp/vault-plugin-secrets-openldap v0.11.0 h1:8J8u7uWLifj3uF5tot9Qj74H8vEwPMNKN+XTLLgSmDw= -github.com/hashicorp/vault-plugin-secrets-openldap v0.11.0/go.mod h1:JVulYJNiG7s3pjwo9HAnq07ViWtGWkz2WAw8ytle+0w= -github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1 h1:Icb3EDpNvb4ltnGff2Zrm3JVNDDdbbL2wdA2LouD2KQ= -github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1/go.mod h1:JHHo1nWOgYPsbTqE/PVwkTKRkLSlPSqo9RBqZ7NLKB8= -github.com/hashicorp/vault-testing-stepwise v0.1.3 h1:GYvm98EB4nUKUntkBcLicnKsebeV89KPHmAGJUCPU/c= -github.com/hashicorp/vault-testing-stepwise v0.1.3/go.mod h1:Ym1T/kMM2sT6qgCIIJ3an7uaSWCJ8O7ohsWB9UiB5tI= -github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 h1:Y/+BtwxmRak3Us9jrByARvYW6uNeqZlEpMylIdXVIjY= -github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77/go.mod h1:a2crHoMWwY6aiL8GWT8hYj7vKD64uX0EdRPbnsHF5wU= +github.com/hashicorp/vault-plugin-secrets-ad v0.14.1 h1:XAkEJ9iFJoBwYFYv3wTcdVFq7DejwXARybSui/LG42Y= +github.com/hashicorp/vault-plugin-secrets-ad v0.14.1/go.mod h1:5XIn6cw1+gG+WWxK0SdEAKCDOXTp+MX90PzZ7f3Eks0= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0 h1:eWDAAvZsKHhnXF8uCiyF/wDqT57fflCs54PTIolONBo= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0/go.mod h1:F4KWrlCQZbhP2dFXCkRvbHX2J6CTydlaY0cH+OrLHCE= +github.com/hashicorp/vault-plugin-secrets-azure v0.14.1 h1:Dh4b7sILU+O17K+g2tWGdeGq9djPw5rqFCOX69JxJbA= +github.com/hashicorp/vault-plugin-secrets-azure v0.14.1/go.mod h1:Xw8CQPkyZSJRK9BXKBruf6kOO8rLyXEf40o19ClK9kY= +github.com/hashicorp/vault-plugin-secrets-gcp v0.14.1 h1:e9uQuKQ4hJWwa19IUSNMolhT0BczyoDryMN83gr3dhY= +github.com/hashicorp/vault-plugin-secrets-gcp v0.14.1/go.mod h1:kRgZfXRD9qUHoGclaR09tKXXwkwNrkY+D76ahetsaB8= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0 h1:R36pNaaN4tJyIrPJej7/355Qt5+Q5XUTB+Az6rGs5xg= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0/go.mod h1:n2VKlYDCuO8+OXN4S1Im8esIL53/ENRFa4gXrvhCVIM= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0 h1:iPue19f7LW63lAo8YFsm0jmo49gox0oIYFPAtVtnzGg= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0/go.mod h1:WO0wUxGh1PxhwdBHD7mXU5XQTqLwMZiJrUwVuzx3tIg= +github.com/hashicorp/vault-plugin-secrets-kv v0.13.3 h1:TUKpQY6fmTiUhaZ71/WTamEuK2JH7ESB92T4VZsq4+g= +github.com/hashicorp/vault-plugin-secrets-kv v0.13.3/go.mod h1:ikPuEWi2rHaGQCHZuPdn/6D3Bq/25ElX3G9pGeDr0Yg= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0 h1:VREm+cJGUXcPCakaYVxQt8wTVqTwJclsIIk2XuqpPbs= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0/go.mod h1:PLx2vxXukfsKsDRo/PlG4fxmJ1d+H2h82wT3vf4buuI= +github.com/hashicorp/vault-plugin-secrets-openldap v0.9.1 h1:qgxKfXQ2WaBohjBr0m4EWYNQJTBO6dkhtqJJZ372YQw= +github.com/hashicorp/vault-plugin-secrets-openldap v0.9.1/go.mod h1:o7mF9tWgDkAD5OvvXWM3bOCqN+n/cCpaMm1CrEUZkHc= +github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0 h1:N5s1ojXyG8gBZlx6BdqE04LviR0rw4vX1dDDMdnEzX8= +github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0/go.mod h1:GzYAJYytgbNNyT3S7rspz1cLE53E1oajFbEtaDUlVGU= +github.com/hashicorp/vault-testing-stepwise v0.1.1/go.mod h1:3vUYn6D0ZadvstNO3YQQlIcp7u1a19MdoOC0NQ0yaOE= +github.com/hashicorp/vault-testing-stepwise v0.1.2 h1:3obC/ziAPGnsz2IQxr5e4Ayb7tu7WL6pm6mmZ5gwhhs= +github.com/hashicorp/vault-testing-stepwise v0.1.2/go.mod h1:TeU6B+5NqxUjto+Zey+QQEH1iywuHn0ciHZNYh4q3uI= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -1938,18 +1167,13 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= -github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -2007,12 +1231,12 @@ github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFK github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= -github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= -github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4= @@ -2035,7 +1259,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= @@ -2057,70 +1280,49 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= -github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= -github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= -github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= -github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -2129,10 +1331,7 @@ github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZi github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -2140,9 +1339,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= @@ -2153,9 +1351,8 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -2166,54 +1363,37 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mediocregopher/radix/v4 v4.1.2 h1:Pj7XnNK5WuzzFy63g98pnccainAePK+aZNQRvxSvj2I= -github.com/mediocregopher/radix/v4 v4.1.2/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= +github.com/mediocregopher/radix/v4 v4.1.1 h1:JkZBEp0y8pWGNZkmO3RR5oEO5huwd4zKKt4rh1C+P8s= +github.com/mediocregopher/radix/v4 v4.1.1/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk= github.com/michaelklishin/rabbit-hole/v2 v2.12.0/go.mod h1:AN/3zyz7d++OHf+4WUo/LR0+Q5nlPHMaXasIsG/mPY0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a h1:eU8j/ClY2Ty3qdHnn0TyW3ivFoPC/0F1gQZz8yTxbbE= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a/go.mod h1:v8eSC2SMp9/7FTKUncp7fH9IwPfw+ysMObcEz5FWheQ= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -2222,10 +1402,8 @@ github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -2236,32 +1414,25 @@ github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= -github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= -github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -2270,19 +1441,13 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/mongodb-forks/digest v1.0.4 h1:9FrGTc7MGAchgaQBcXBnEwUM/Oo8obW7OGWxnsSvZ64= -github.com/mongodb-forks/digest v1.0.4/go.mod h1:eHRfgovT+dvSFfltrOa27hy1oR/rcwyDdp5H1ZQxEMA= +github.com/mongodb-forks/digest v1.0.3 h1:ZUK1vyZnBiRMvET0O1SzmnBmv935CkcOTjhfR4zIQ2s= +github.com/mongodb-forks/digest v1.0.3/go.mod h1:eHRfgovT+dvSFfltrOa27hy1oR/rcwyDdp5H1ZQxEMA= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= -github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -2291,7 +1456,6 @@ github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc h1:7xGrl4tTpBQu5Z github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -2309,7 +1473,6 @@ github.com/okta/okta-sdk-golang/v2 v2.12.1 h1:U+smE7trkHSZO8Mval3Ow85dbxawO+pMAr github.com/okta/okta-sdk-golang/v2 v2.12.1/go.mod h1:KRoAArk1H216oiRnQT77UN6JAhBOnOWkK27yA1SM7FQ= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -2319,19 +1482,10 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= -github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -2342,18 +1496,8 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= -github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= -github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -2362,53 +1506,39 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA= -github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= -github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= -github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU= -github.com/oracle/oci-go-sdk v24.3.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY= +github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM= +github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/oracle/oci-go-sdk v13.1.0+incompatible h1:inwbT0b/mMbnTfzYoW2xcU1cCMIlU6Fz973at5phRXM= +github.com/oracle/oci-go-sdk v13.1.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM= github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= -github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/ory/dockertest/v3 v3.8.0/go.mod h1:9zPATATlWQru+ynXP+DytBQrsXV7Tmlx7K86H6fQaDo= +github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= +github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= @@ -2423,38 +1553,28 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.16/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= -github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw= github.com/pires/go-proxyproto v0.6.1/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -2468,7 +1588,6 @@ github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8 github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -2478,19 +1597,14 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -2499,11 +1613,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -2515,15 +1626,11 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -2533,53 +1640,41 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.2.0/go.mod h1:WkKB1DnNtvsMlDmQ50sgwowDJV/hGbJSOvJoEXs1AJQ= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-limiter v0.7.1 h1:wWNhTj0pxjyJ7wuJHpRJpYwJn+bUnjYfw2a85eu5w9U= github.com/sethvargo/go-limiter v0.7.1/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ= github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -2592,41 +1687,27 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.6.18 h1:mm4KYvp3LWGHIuACwX/tHv9qDs2NdLDXuK0Rep+vfJc= -github.com/snowflakedb/gosnowflake v1.6.18/go.mod h1:BhNDWNSUY+t4T8GBuOg3ckWC4v5hhGlLovqGcF8Rkac= +github.com/snowflakedb/gosnowflake v1.6.3 h1:EJDdDi74YbYt1ty164ge3fMZ0eVZ6KA7b1zmAa/wnRo= +github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -2636,7 +1717,6 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= @@ -2645,10 +1725,8 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -2659,79 +1737,54 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tilinna/clock v1.0.2 h1:6BO2tyAC9JbPExKH/z9zl44FLu1lImh3nDNKA0kgrkI= github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= -github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= -github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= -github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= -github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -2746,7 +1799,7 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -2764,42 +1817,24 @@ github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= -go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= -go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= +go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 h1:19vOZe7geDEympjWIVidGi6/psR5Y+aaKnF17PSpdXA= +go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672/go.mod h1:wSVAyLiSU4JOBlqGr29lZeKbllk31oCAXAdTa6MioWQ= +go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= -go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= +go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= -go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= -go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.mongodb.org/atlas v0.28.0 h1:CelAXtmiM36tdifSDwWdDH1nNbdvq0M2XfUR8208JxA= -go.mongodb.org/atlas v0.28.0/go.mod h1:L4BKwVx/OeEhOVjCSdgo90KJm4469iv7ZLzQms/EPTg= +go.mongodb.org/atlas v0.13.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= +go.mongodb.org/atlas v0.15.0 h1:YyOBdBIuI//krRITf4r7PSirJ3YDNNUfNmapxwSyDow= +go.mongodb.org/atlas v0.15.0/go.mod h1:lQhRHIxc6jQHEK3/q9WLu/SdBkPj2fQYhjLGUF6Z3U8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= @@ -2807,8 +1842,8 @@ go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.11.6 h1:XM7G6PjiGAO5betLF13BIa5TlLUUE3uJ/2Ox3Lz1K+o= -go.mongodb.org/mongo-driver v1.11.6/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqblyjsfuh8oXY= +go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -2816,86 +1851,30 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4= +go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= -go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -2907,7 +1886,6 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2927,75 +1905,41 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= -golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= -golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -3007,6 +1951,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -3019,15 +1964,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -3066,6 +2005,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -3088,16 +2028,17 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -3105,22 +2046,11 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -3141,18 +2071,8 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 h1:zwrSfklXn0gxyLRX/aR+q6cgHbV/ItVyzbPlbA+dkAw= +golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3167,11 +2087,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -3237,17 +2154,21 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3266,17 +2187,13 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3288,68 +2205,43 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -3360,12 +2252,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3374,22 +2261,15 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -3415,12 +2295,10 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -3440,6 +2318,8 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200416214402-fc959738d646/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -3453,31 +2333,22 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -3485,19 +2356,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -3511,11 +2371,13 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= @@ -3536,27 +2398,9 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.124.0 h1:dP6Ef1VgOGqQ8eiv4GiY8RhmeyqzovcXBYPDUYG8Syo= -google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.83.0 h1:pMvST+6v+46Gabac4zlJlalxZjCeRcepwg2EdBU+nCc= +google.golang.org/api v0.83.0/go.mod h1:CNywQoj/AfhTw26ZWAa6LwOv+6WFxHmeLPZq2uncLZk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3591,7 +2435,8 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200416231807-8751e049a2a0/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -3609,13 +2454,10 @@ google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -3638,7 +2480,6 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= @@ -3646,63 +2487,14 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230525154841-bd750badd5c6 h1:62QuyPXKEkZpjZesyj5K5jABl6MnSnWl+vNuT5oz90E= -google.golang.org/genproto v0.0.0-20230525154841-bd750badd5c6/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 h1:qRu95HZ148xXw+XeZ3dvqe85PxH4X8+jIo0iRPKcEnM= +google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -3718,6 +2510,7 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -3736,21 +2529,12 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -3767,9 +2551,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3777,21 +2560,24 @@ gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= @@ -3827,9 +2613,8 @@ gotest.tools/gotestsum v1.9.0 h1:Jbo/0k/sIOXIJu51IZxEAt27n77xspFEfL6SqKUR72A= gotest.tools/gotestsum v1.9.0/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -3837,59 +2622,39 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= -honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= -k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo= -k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= -k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg= -k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= -k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE= -k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= -k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU= -k8s.io/cri-api v0.26.2/go.mod h1:Oo8O7MKFPNDxfDf2LmrF/3Hf30q1C6iliGuv3la3tIA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -3897,93 +2662,35 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg= +k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= -k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/helper/benchhelpers/benchhelpers.go b/helper/benchhelpers/benchhelpers.go index 06dcde604e618..9c0feac15c83d 100644 --- a/helper/benchhelpers/benchhelpers.go +++ b/helper/benchhelpers/benchhelpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package benchhelpers import ( diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index b4d3da5937f01..9bd5c7c0289c9 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -1,11 +1,6 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package builtinplugins import ( - "context" - credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" credAzure "github.com/hashicorp/vault-plugin-auth-azure" credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" @@ -31,6 +26,7 @@ import ( logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform" + credAppId "github.com/hashicorp/vault/builtin/credential/app-id" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" credAws "github.com/hashicorp/vault/builtin/credential/aws" credCert "github.com/hashicorp/vault/builtin/credential/cert" @@ -40,9 +36,14 @@ import ( credRadius "github.com/hashicorp/vault/builtin/credential/radius" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" logicalAws "github.com/hashicorp/vault/builtin/logical/aws" + logicalCass "github.com/hashicorp/vault/builtin/logical/cassandra" logicalConsul "github.com/hashicorp/vault/builtin/logical/consul" + logicalMongo "github.com/hashicorp/vault/builtin/logical/mongodb" + logicalMssql "github.com/hashicorp/vault/builtin/logical/mssql" + logicalMysql "github.com/hashicorp/vault/builtin/logical/mysql" logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad" logicalPki "github.com/hashicorp/vault/builtin/logical/pki" + logicalPostgres "github.com/hashicorp/vault/builtin/logical/postgresql" logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq" logicalSsh "github.com/hashicorp/vault/builtin/logical/ssh" logicalTotp "github.com/hashicorp/vault/builtin/logical/totp" @@ -55,7 +56,6 @@ import ( dbMysql "github.com/hashicorp/vault/plugins/database/mysql" dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) @@ -86,23 +86,13 @@ type logicalBackend struct { consts.DeprecationStatus } -type removedBackend struct { - *framework.Backend -} - -func removedFactory(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - removedBackend := &removedBackend{} - removedBackend.Backend = &framework.Backend{} - return removedBackend, nil -} - func newRegistry() *registry { reg := ®istry{ credentialBackends: map[string]credentialBackend{ "alicloud": {Factory: credAliCloud.Factory}, "app-id": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, + Factory: credAppId.Factory, + DeprecationStatus: consts.PendingRemoval, }, "approle": {Factory: credAppRole.Factory}, "aws": {Factory: credAws.Factory}, @@ -149,16 +139,13 @@ func newRegistry() *registry { "snowflake-database-plugin": {Factory: dbSnowflake.New}, }, logicalBackends: map[string]logicalBackend{ - "ad": { - Factory: logicalAd.Factory, - DeprecationStatus: consts.Deprecated, - }, + "ad": {Factory: logicalAd.Factory}, "alicloud": {Factory: logicalAlicloud.Factory}, "aws": {Factory: logicalAws.Factory}, "azure": {Factory: logicalAzure.Factory}, "cassandra": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, + Factory: logicalCass.Factory, + DeprecationStatus: consts.PendingRemoval, }, "consul": {Factory: logicalConsul.Factory}, "gcp": {Factory: logicalGcp.Factory}, @@ -166,27 +153,25 @@ func newRegistry() *registry { "kubernetes": {Factory: logicalKube.Factory}, "kv": {Factory: logicalKv.Factory}, "mongodb": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, + Factory: logicalMongo.Factory, + DeprecationStatus: consts.PendingRemoval, }, - // The mongodbatlas secrets engine is not the same as the database plugin equivalent - // (`mongodbatlas-database-plugin`), and thus will not be deprecated at this time. "mongodbatlas": {Factory: logicalMongoAtlas.Factory}, "mssql": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, + Factory: logicalMssql.Factory, + DeprecationStatus: consts.PendingRemoval, }, "mysql": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, + Factory: logicalMysql.Factory, + DeprecationStatus: consts.PendingRemoval, }, "nomad": {Factory: logicalNomad.Factory}, "openldap": {Factory: logicalLDAP.Factory}, "ldap": {Factory: logicalLDAP.Factory}, "pki": {Factory: logicalPki.Factory}, "postgresql": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, + Factory: logicalPostgres.Factory, + DeprecationStatus: consts.PendingRemoval, }, "rabbitmq": {Factory: logicalRabbit.Factory}, "ssh": {Factory: logicalSsh.Factory}, @@ -237,16 +222,16 @@ func (r *registry) Keys(pluginType consts.PluginType) []string { var keys []string switch pluginType { case consts.PluginTypeDatabase: - for key, backend := range r.databasePlugins { - keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) + for key := range r.databasePlugins { + keys = append(keys, key) } case consts.PluginTypeCredential: - for key, backend := range r.credentialBackends { - keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) + for key := range r.credentialBackends { + keys = append(keys, key) } case consts.PluginTypeSecrets: - for key, backend := range r.logicalBackends { - keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) + for key := range r.logicalBackends { + keys = append(keys, key) } } return keys @@ -288,10 +273,3 @@ func toFunc(ifc interface{}) func() (interface{}, error) { return ifc, nil } } - -func appendIfNotRemoved(keys []string, name string, status consts.DeprecationStatus) []string { - if status != consts.Removed { - return append(keys, name) - } - return keys -} diff --git a/helper/builtinplugins/registry_test.go b/helper/builtinplugins/registry_test.go index cfaec51814477..5e63ba3e701f3 100644 --- a/helper/builtinplugins/registry_test.go +++ b/helper/builtinplugins/registry_test.go @@ -1,21 +1,12 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package builtinplugins import ( - "bufio" - "fmt" - "os" "reflect" - "regexp" "testing" - credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + credAppId "github.com/hashicorp/vault/builtin/credential/app-id" dbMysql "github.com/hashicorp/vault/plugins/database/mysql" "github.com/hashicorp/vault/sdk/helper/consts" - - "golang.org/x/exp/slices" ) // Test_RegistryGet exercises the (registry).Get functionality by comparing @@ -44,16 +35,9 @@ func Test_RegistryGet(t *testing.T) { }, { name: "known builtin lookup", - builtin: "userpass", - pluginType: consts.PluginTypeCredential, - want: toFunc(credUserpass.Factory), - wantOk: true, - }, - { - name: "removed builtin lookup", builtin: "app-id", pluginType: consts.PluginTypeCredential, - want: nil, + want: toFunc(credAppId.Factory), wantOk: true, }, { @@ -97,7 +81,7 @@ func Test_RegistryKeyCounts(t *testing.T) { { name: "number of auth plugins", pluginType: consts.PluginTypeCredential, - want: 19, + want: 20, }, { name: "number of database plugins", @@ -107,7 +91,7 @@ func Test_RegistryKeyCounts(t *testing.T) { { name: "number of secrets plugins", pluginType: consts.PluginTypeSecrets, - want: 19, + want: 24, }, } for _, tt := range tests { @@ -142,15 +126,9 @@ func Test_RegistryContains(t *testing.T) { }, { name: "known builtin lookup", - builtin: "approle", - pluginType: consts.PluginTypeCredential, - want: true, - }, - { - name: "removed builtin lookup", builtin: "app-id", pluginType: consts.PluginTypeCredential, - want: false, + want: true, }, } for _, tt := range tests { @@ -208,10 +186,10 @@ func Test_RegistryStatus(t *testing.T) { wantOk: true, }, { - name: "removed builtin lookup", + name: "pending removal builtin lookup", builtin: "app-id", pluginType: consts.PluginTypeCredential, - want: consts.Removed, + want: consts.PendingRemoval, wantOk: true, }, } @@ -227,108 +205,3 @@ func Test_RegistryStatus(t *testing.T) { }) } } - -// Test_RegistryMatchesGenOpenapi ensures that the plugins mounted in gen_openapi.sh match registry.go -func Test_RegistryMatchesGenOpenapi(t *testing.T) { - const scriptPath = "../../scripts/gen_openapi.sh" - - // parseScript fetches the contents of gen_openapi.sh script & extract the relevant lines - parseScript := func(path string) ([]string, []string, error) { - f, err := os.Open(scriptPath) - if err != nil { - return nil, nil, fmt.Errorf("could not open gen_openapi.sh script: %w", err) - } - defer f.Close() - - var ( - credentialBackends []string - credentialBackendsRe = regexp.MustCompile(`^vault auth enable (?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) - - secretsBackends []string - secretsBackendsRe = regexp.MustCompile(`^vault secrets enable (?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) - ) - - scanner := bufio.NewScanner(f) - - for scanner.Scan() { - line := scanner.Text() - - if m := credentialBackendsRe.FindStringSubmatch(line); m != nil { - credentialBackends = append(credentialBackends, m[1]) - } - if m := secretsBackendsRe.FindStringSubmatch(line); m != nil { - secretsBackends = append(secretsBackends, m[1]) - } - } - - if err := scanner.Err(); err != nil { - return nil, nil, fmt.Errorf("error scanning gen_openapi.sh: %v", err) - } - - return credentialBackends, secretsBackends, nil - } - - // ensureInRegistry ensures that the given plugin is in registry and marked as "supported" - ensureInRegistry := func(t *testing.T, name string, pluginType consts.PluginType) { - t.Helper() - - // "database" will not be present in registry, it is represented as - // a list of database plugins instead - if name == "database" && pluginType == consts.PluginTypeSecrets { - return - } - - deprecationStatus, ok := Registry.DeprecationStatus(name, pluginType) - if !ok { - t.Fatalf("%q %s backend is missing from registry.go; please remove it from gen_openapi.sh", name, pluginType) - } - - if deprecationStatus == consts.Removed { - t.Fatalf("%q %s backend is marked 'removed' in registry.go; please remove it from gen_openapi.sh", name, pluginType) - } - } - - // ensureInScript ensures that the given plugin name in in gen_openapi.sh script - ensureInScript := func(t *testing.T, scriptBackends []string, name string) { - t.Helper() - - for _, excluded := range []string{ - "oidc", // alias for "jwt" - "openldap", // alias for "ldap" - } { - if name == excluded { - return - } - } - - if !slices.Contains(scriptBackends, name) { - t.Fatalf("%q backend could not be found in gen_openapi.sh, please add it there", name) - } - } - - // test starts here - scriptCredentialBackends, scriptSecretsBackends, err := parseScript(scriptPath) - if err != nil { - t.Fatal(err) - } - - for _, name := range scriptCredentialBackends { - ensureInRegistry(t, name, consts.PluginTypeCredential) - } - - for _, name := range scriptSecretsBackends { - ensureInRegistry(t, name, consts.PluginTypeSecrets) - } - - for name, backend := range Registry.credentialBackends { - if backend.DeprecationStatus == consts.Supported { - ensureInScript(t, scriptCredentialBackends, name) - } - } - - for name, backend := range Registry.logicalBackends { - if backend.DeprecationStatus == consts.Supported { - ensureInScript(t, scriptSecretsBackends, name) - } - } -} diff --git a/helper/constants/constants_oss.go b/helper/constants/constants_oss.go index 8675f7030658a..22b9928fd3c41 100644 --- a/helper/constants/constants_oss.go +++ b/helper/constants/constants_oss.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !enterprise package constants diff --git a/helper/constants/fips.go b/helper/constants/fips.go index 9632d014a5496..2a9f7ee7aae6b 100644 --- a/helper/constants/fips.go +++ b/helper/constants/fips.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !fips package constants diff --git a/helper/constants/fips_build_check.go b/helper/constants/fips_build_check.go index 10e07e583f98e..1e865b499f62e 100644 --- a/helper/constants/fips_build_check.go +++ b/helper/constants/fips_build_check.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build (!fips && (fips_140_2 || fips_140_3)) || (fips && !fips_140_2 && !fips_140_3) || (fips_140_2 && fips_140_3) package constants diff --git a/helper/constants/fips_cgo_check.go b/helper/constants/fips_cgo_check.go index 6de7d9f0d0315..56eabb6c81e5f 100644 --- a/helper/constants/fips_cgo_check.go +++ b/helper/constants/fips_cgo_check.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build (fips || fips_140_2 || fips_140_3) && !cgo package constants diff --git a/helper/dhutil/dhutil.go b/helper/dhutil/dhutil.go index 97552d4cc4c69..a0ddde25bd43a 100644 --- a/helper/dhutil/dhutil.go +++ b/helper/dhutil/dhutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dhutil import ( diff --git a/helper/dhutil/dhutil_test.go b/helper/dhutil/dhutil_test.go index 4b94f601d92d6..46e90196d15e7 100644 --- a/helper/dhutil/dhutil_test.go +++ b/helper/dhutil/dhutil_test.go @@ -1,4 +1 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dhutil diff --git a/helper/experiments/experiments.go b/helper/experiments/experiments.go deleted file mode 100644 index 538430e64cccf..0000000000000 --- a/helper/experiments/experiments.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package experiments - -const VaultExperimentEventsAlpha1 = "events.alpha1" - -var validExperiments = []string{ - VaultExperimentEventsAlpha1, -} - -// ValidExperiments exposes the list without exposing a mutable global variable. -// Experiments can only be enabled when starting a server, and will typically -// enable pre-GA API functionality. -func ValidExperiments() []string { - result := make([]string, len(validExperiments)) - copy(result, validExperiments) - return result -} diff --git a/helper/fairshare/fairshare_testing_util.go b/helper/fairshare/fairshare_testing_util.go index 8061795947d82..1f65acd94d903 100644 --- a/helper/fairshare/fairshare_testing_util.go +++ b/helper/fairshare/fairshare_testing_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package fairshare import ( diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go index dc9a6198af256..75c7662fc7656 100644 --- a/helper/fairshare/jobmanager.go +++ b/helper/fairshare/jobmanager.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package fairshare import ( diff --git a/helper/fairshare/jobmanager_test.go b/helper/fairshare/jobmanager_test.go index 3d6638a4a766f..d90314782294e 100644 --- a/helper/fairshare/jobmanager_test.go +++ b/helper/fairshare/jobmanager_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package fairshare import ( diff --git a/helper/fairshare/workerpool.go b/helper/fairshare/workerpool.go index e655a9084dd6c..f5179ba4e408c 100644 --- a/helper/fairshare/workerpool.go +++ b/helper/fairshare/workerpool.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package fairshare import ( diff --git a/helper/fairshare/workerpool_test.go b/helper/fairshare/workerpool_test.go index eb563140374b2..a3c3f68a1e3fe 100644 --- a/helper/fairshare/workerpool_test.go +++ b/helper/fairshare/workerpool_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package fairshare import ( diff --git a/helper/flag-kv/flag.go b/helper/flag-kv/flag.go index a3b04cec11ae6..06ae27111a8a1 100644 --- a/helper/flag-kv/flag.go +++ b/helper/flag-kv/flag.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kvFlag import ( diff --git a/helper/flag-kv/flag_test.go b/helper/flag-kv/flag_test.go index b083d52e74e35..2fc88aa5f3ed7 100644 --- a/helper/flag-kv/flag_test.go +++ b/helper/flag-kv/flag_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kvFlag import ( diff --git a/helper/flag-slice/flag.go b/helper/flag-slice/flag.go index b8234385ef03c..da75149dc488e 100644 --- a/helper/flag-slice/flag.go +++ b/helper/flag-slice/flag.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package sliceflag import "strings" diff --git a/helper/flag-slice/flag_test.go b/helper/flag-slice/flag_test.go index 7973d57926a21..f72e1d9605d66 100644 --- a/helper/flag-slice/flag_test.go +++ b/helper/flag-slice/flag_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package sliceflag import ( diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index bf579d0382c2c..c610a3e3e4023 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: helper/forwarding/types.proto package forwarding diff --git a/helper/forwarding/types.proto b/helper/forwarding/types.proto index 7624257919dea..8f1376a180044 100644 --- a/helper/forwarding/types.proto +++ b/helper/forwarding/types.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/helper/forwarding"; diff --git a/helper/forwarding/util.go b/helper/forwarding/util.go index 8361235432221..de92639afbee1 100644 --- a/helper/forwarding/util.go +++ b/helper/forwarding/util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package forwarding import ( diff --git a/helper/forwarding/util_test.go b/helper/forwarding/util_test.go index 192646a15f4d2..0af2b89e989b2 100644 --- a/helper/forwarding/util_test.go +++ b/helper/forwarding/util_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package forwarding import ( diff --git a/helper/hostutil/hostinfo.go b/helper/hostutil/hostinfo.go index 25c11e0b0753b..d35afb57d900a 100644 --- a/helper/hostutil/hostinfo.go +++ b/helper/hostutil/hostinfo.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !openbsd package hostutil diff --git a/helper/hostutil/hostinfo_error.go b/helper/hostutil/hostinfo_error.go index afbec28fa262d..ca5d8a2941c03 100644 --- a/helper/hostutil/hostinfo_error.go +++ b/helper/hostutil/hostinfo_error.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package hostutil import "fmt" diff --git a/helper/hostutil/hostinfo_openbsd.go b/helper/hostutil/hostinfo_openbsd.go index dbe1655e90dc8..8f01458afe1ae 100644 --- a/helper/hostutil/hostinfo_openbsd.go +++ b/helper/hostutil/hostinfo_openbsd.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build openbsd package hostutil diff --git a/helper/hostutil/hostinfo_test.go b/helper/hostutil/hostinfo_test.go index 0f53744adc1e3..c54893b17dbb9 100644 --- a/helper/hostutil/hostinfo_test.go +++ b/helper/hostutil/hostinfo_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package hostutil import ( diff --git a/helper/identity/identity.go b/helper/identity/identity.go index a7769f08e0186..9a28be7156791 100644 --- a/helper/identity/identity.go +++ b/helper/identity/identity.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package identity import ( diff --git a/helper/identity/mfa/mfa.go b/helper/identity/mfa/mfa.go index 1f8af4f4c20b2..d4bbf10b48463 100644 --- a/helper/identity/mfa/mfa.go +++ b/helper/identity/mfa/mfa.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mfa import ( diff --git a/helper/identity/mfa/sentinel.go b/helper/identity/mfa/sentinel.go index a587aa70b699c..f6d8c7b994224 100644 --- a/helper/identity/mfa/sentinel.go +++ b/helper/identity/mfa/sentinel.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mfa func (c *Config) SentinelGet(key string) (interface{}, error) { diff --git a/helper/identity/mfa/types.pb.go b/helper/identity/mfa/types.pb.go index 57dbab0da60c8..59d989e86e3a7 100644 --- a/helper/identity/mfa/types.pb.go +++ b/helper/identity/mfa/types.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: helper/identity/mfa/types.proto package mfa diff --git a/helper/identity/mfa/types.proto b/helper/identity/mfa/types.proto index 65eb853be2461..decade25b9af7 100644 --- a/helper/identity/mfa/types.proto +++ b/helper/identity/mfa/types.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; diff --git a/helper/identity/sentinel.go b/helper/identity/sentinel.go index 4f65e62c848de..2c2bc4b940f25 100644 --- a/helper/identity/sentinel.go +++ b/helper/identity/sentinel.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package identity import "github.com/golang/protobuf/ptypes" diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go index 91b4c0ff20d78..7a88a74773104 100644 --- a/helper/identity/types.pb.go +++ b/helper/identity/types.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: helper/identity/types.proto package identity diff --git a/helper/identity/types.proto b/helper/identity/types.proto index a34d715acf340..0ea7525eb0f5b 100644 --- a/helper/identity/types.proto +++ b/helper/identity/types.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/helper/identity"; diff --git a/helper/locking/lock.go b/helper/locking/lock.go deleted file mode 100644 index a9bff4c0f06c7..0000000000000 --- a/helper/locking/lock.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package locking - -import ( - "sync" - - "github.com/sasha-s/go-deadlock" -) - -// Common mutex interface to allow either built-in or imported deadlock use -type Mutex interface { - Lock() - Unlock() -} - -// Common r/w mutex interface to allow either built-in or imported deadlock use -type RWMutex interface { - Lock() - RLock() - RLocker() sync.Locker - RUnlock() - Unlock() -} - -// DeadlockMutex (used when requested via config option `detact_deadlocks`), -// behaves like a sync.Mutex but does periodic checking to see if outstanding -// locks and requests look like a deadlock. If it finds a deadlock candidate it -// will output it prefixed with "POTENTIAL DEADLOCK", as described at -// https://github.com/sasha-s/go-deadlock -type DeadlockMutex struct { - deadlock.Mutex -} - -// DeadlockRWMutex is the RW version of DeadlockMutex. -type DeadlockRWMutex struct { - deadlock.RWMutex -} - -// Regular sync/mutex. -type SyncMutex struct { - sync.Mutex -} - -// DeadlockRWMutex is the RW version of SyncMutex. -type SyncRWMutex struct { - sync.RWMutex -} diff --git a/helper/logging/logfile.go b/helper/logging/logfile.go index 9417e9ca8233d..b1fd46cbaff43 100644 --- a/helper/logging/logfile.go +++ b/helper/logging/logfile.go @@ -1,23 +1,12 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logging import ( - "fmt" "os" "path/filepath" - "sort" - "strconv" "strings" "sync" - "time" - - "github.com/hashicorp/go-multierror" ) -var now = time.Now - type LogFile struct { // Name of the log file fileName string @@ -25,126 +14,43 @@ type LogFile struct { // Path to the log file logPath string - // duration between each file rotation operation - duration time.Duration - - // lastCreated represents the creation time of the latest log - lastCreated time.Time - // fileInfo is the pointer to the current file being written to fileInfo *os.File - // maxBytes is the maximum number of desired bytes for a log file - maxBytes int - - // bytesWritten is the number of bytes written in the current log file - bytesWritten int64 - - // Max rotated files to keep before removing them. - maxArchivedFiles int - // acquire is the mutex utilized to ensure we have no concurrency issues acquire sync.Mutex } +func NewLogFile(logPath string, fileName string) *LogFile { + return &LogFile{ + fileName: strings.TrimSpace(fileName), + logPath: strings.TrimSpace(logPath), + } +} + // Write is used to implement io.Writer func (l *LogFile) Write(b []byte) (n int, err error) { l.acquire.Lock() defer l.acquire.Unlock() - // Create a new file if we have no file to write to if l.fileInfo == nil { if err := l.openNew(); err != nil { return 0, err } - } else if err := l.rotate(); err != nil { // Check for the last contact and rotate if necessary - return 0, err - } - - bytesWritten, err := l.fileInfo.Write(b) - - if bytesWritten > 0 { - l.bytesWritten += int64(bytesWritten) } - return bytesWritten, err -} - -func (l *LogFile) fileNamePattern() string { - // Extract the file extension - fileExt := filepath.Ext(l.fileName) - // If we have no file extension we append .log - if fileExt == "" { - fileExt = ".log" - } - // Remove the file extension from the filename - return strings.TrimSuffix(l.fileName, fileExt) + "-%s" + fileExt + return l.fileInfo.Write(b) } func (l *LogFile) openNew() error { - fileNamePattern := l.fileNamePattern() + newFilePath := filepath.Join(l.logPath, l.fileName) - createTime := now() - newFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) - newFilePath := filepath.Join(l.logPath, newFileName) - - // Try creating a file. We truncate the file because we are the only authority to write the logs - filePointer, err := os.OpenFile(newFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) + // Try to open an existing file or create a new one if it doesn't exist. + filePointer, err := os.OpenFile(newFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o640) if err != nil { return err } - // New file, new 'bytes' tracker, new creation time :) :) l.fileInfo = filePointer - l.lastCreated = createTime - l.bytesWritten = 0 return nil } - -func (l *LogFile) rotate() error { - // Get the time from the last point of contact - timeElapsed := time.Since(l.lastCreated) - // Rotate if we hit the byte file limit or the time limit - if (l.bytesWritten >= int64(l.maxBytes) && (l.maxBytes > 0)) || timeElapsed >= l.duration { - if err := l.fileInfo.Close(); err != nil { - return err - } - if err := l.pruneFiles(); err != nil { - return err - } - return l.openNew() - } - return nil -} - -func (l *LogFile) pruneFiles() error { - if l.maxArchivedFiles == 0 { - return nil - } - - pattern := filepath.Join(l.logPath, fmt.Sprintf(l.fileNamePattern(), "*")) - matches, err := filepath.Glob(pattern) - if err != nil { - return err - } - - switch { - case l.maxArchivedFiles < 0: - return removeFiles(matches) - case len(matches) < l.maxArchivedFiles: - return nil - } - - sort.Strings(matches) - last := len(matches) - l.maxArchivedFiles - return removeFiles(matches[:last]) -} - -func removeFiles(files []string) (err error) { - for _, file := range files { - if fileError := os.Remove(file); fileError != nil { - err = multierror.Append(err, fmt.Errorf("error removing file %s: %v", file, fileError)) - } - } - return err -} diff --git a/helper/logging/logfile_test.go b/helper/logging/logfile_test.go deleted file mode 100644 index a0cae986aadcb..0000000000000 --- a/helper/logging/logfile_test.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package logging - -import ( - "os" - "path/filepath" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLogFile_openNew(t *testing.T) { - logFile := &LogFile{ - fileName: "vault.log", - logPath: t.TempDir(), - duration: defaultRotateDuration, - } - - err := logFile.openNew() - require.NoError(t, err) - - msg := "[INFO] Something" - _, err = logFile.Write([]byte(msg)) - require.NoError(t, err) - - content, err := os.ReadFile(logFile.fileInfo.Name()) - require.NoError(t, err) - require.Contains(t, string(content), msg) -} - -func TestLogFile_Rotation_MaxDuration(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - tempDir := t.TempDir() - logFile := LogFile{ - fileName: "vault.log", - logPath: tempDir, - duration: 50 * time.Millisecond, - } - - _, err := logFile.Write([]byte("Hello World")) - assert.NoError(t, err, "error writing rotation max duration part 1") - - time.Sleep(3 * logFile.duration) - - _, err = logFile.Write([]byte("Second File")) - assert.NoError(t, err, "error writing rotation max duration part 2") - - require.Len(t, listDir(t, tempDir), 2) -} - -func TestLogFile_Rotation_MaxBytes(t *testing.T) { - tempDir := t.TempDir() - logFile := LogFile{ - fileName: "somefile.log", - logPath: tempDir, - maxBytes: 10, - duration: defaultRotateDuration, - } - _, err := logFile.Write([]byte("Hello World")) - assert.NoError(t, err, "error writing rotation max bytes part 1") - - _, err = logFile.Write([]byte("Second File")) - assert.NoError(t, err, "error writing rotation max bytes part 2") - - require.Len(t, listDir(t, tempDir), 2) -} - -func TestLogFile_PruneFiles(t *testing.T) { - tempDir := t.TempDir() - logFile := LogFile{ - fileName: "vault.log", - logPath: tempDir, - maxBytes: 10, - duration: defaultRotateDuration, - maxArchivedFiles: 1, - } - _, err := logFile.Write([]byte("[INFO] Hello World")) - assert.NoError(t, err, "error writing during prune files test part 1") - - _, err = logFile.Write([]byte("[INFO] Second File")) - assert.NoError(t, err, "error writing during prune files test part 1") - - _, err = logFile.Write([]byte("[INFO] Third File")) - assert.NoError(t, err, "error writing during prune files test part 1") - - logFiles := listDir(t, tempDir) - sort.Strings(logFiles) - require.Len(t, logFiles, 2) - - content, err := os.ReadFile(filepath.Join(tempDir, logFiles[0])) - require.NoError(t, err) - require.Contains(t, string(content), "Second File") - - content, err = os.ReadFile(filepath.Join(tempDir, logFiles[1])) - require.NoError(t, err) - require.Contains(t, string(content), "Third File") -} - -func TestLogFile_PruneFiles_Disabled(t *testing.T) { - tempDir := t.TempDir() - logFile := LogFile{ - fileName: "somename.log", - logPath: tempDir, - maxBytes: 10, - duration: defaultRotateDuration, - maxArchivedFiles: 0, - } - - _, err := logFile.Write([]byte("[INFO] Hello World")) - assert.NoError(t, err, "error writing during prune files - disabled test part 1") - - _, err = logFile.Write([]byte("[INFO] Second File")) - assert.NoError(t, err, "error writing during prune files - disabled test part 2") - - _, err = logFile.Write([]byte("[INFO] Third File")) - assert.NoError(t, err, "error writing during prune files - disabled test part 3") - - require.Len(t, listDir(t, tempDir), 3) -} - -func TestLogFile_FileRotation_Disabled(t *testing.T) { - tempDir := t.TempDir() - logFile := LogFile{ - fileName: "vault.log", - logPath: tempDir, - maxBytes: 10, - maxArchivedFiles: -1, - } - - _, err := logFile.Write([]byte("[INFO] Hello World")) - assert.NoError(t, err, "error writing during rotation disabled test part 1") - - _, err = logFile.Write([]byte("[INFO] Second File")) - assert.NoError(t, err, "error writing during rotation disabled test part 2") - - _, err = logFile.Write([]byte("[INFO] Third File")) - assert.NoError(t, err, "error writing during rotation disabled test part 3") - - require.Len(t, listDir(t, tempDir), 1) -} - -func listDir(t *testing.T, name string) []string { - t.Helper() - fh, err := os.Open(name) - require.NoError(t, err) - files, err := fh.Readdirnames(100) - require.NoError(t, err) - return files -} diff --git a/helper/logging/logger.go b/helper/logging/logger.go index 1efac27bedfd2..6e87fde5387bd 100644 --- a/helper/logging/logger.go +++ b/helper/logging/logger.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logging import ( @@ -9,10 +6,8 @@ import ( "io" "path/filepath" "strings" - "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" ) const ( @@ -21,41 +16,27 @@ const ( JSONFormat ) -// defaultRotateDuration is the default time taken by the agent to rotate logs -const defaultRotateDuration = 24 * time.Hour - type LogFormat int // LogConfig should be used to supply configuration when creating a new Vault logger type LogConfig struct { - // Name is the name the returned logger will use to prefix log lines. - Name string - - // LogLevel is the minimum level to be logged. - LogLevel log.Level - - // LogFormat is the log format to use, supported formats are 'standard' and 'json'. - LogFormat LogFormat - - // LogFilePath is the path to write the logs to the user specified file. - LogFilePath string - - // LogRotateDuration is the user specified time to rotate logs - LogRotateDuration time.Duration - - // LogRotateBytes is the user specified byte limit to rotate logs - LogRotateBytes int - - // LogRotateMaxFiles is the maximum number of past archived log files to keep - LogRotateMaxFiles int + name string + logLevel log.Level + logFormat LogFormat + logFilePath string } -func (c *LogConfig) isLevelInvalid() bool { - return c.LogLevel == log.NoLevel || c.LogLevel == log.Off || c.LogLevel.String() == "unknown" +func NewLogConfig(name string, logLevel log.Level, logFormat LogFormat, logFilePath string) LogConfig { + return LogConfig{ + name: name, + logLevel: logLevel, + logFormat: logFormat, + logFilePath: strings.TrimSpace(logFilePath), + } } -func (c *LogConfig) isFormatJson() bool { - return c.LogFormat == JSONFormat +func (c LogConfig) IsFormatJson() bool { + return c.logFormat == JSONFormat } // Stringer implementation @@ -84,30 +65,11 @@ func (w noErrorWriter) Write(p []byte) (n int, err error) { return len(p), nil } -// parseFullPath takes a full path intended to be the location for log files and -// breaks it down into a directory and a file name. It checks both of these for -// the common globbing character '*' and returns an error if it is present. -func parseFullPath(fullPath string) (directory, fileName string, err error) { - directory, fileName = filepath.Split(fullPath) - - globChars := "*?[" - if strings.ContainsAny(directory, globChars) { - err = multierror.Append(err, fmt.Errorf("directory contains glob character")) - } - if fileName == "" { - fileName = "vault.log" - } else if strings.ContainsAny(fileName, globChars) { - err = multierror.Append(err, fmt.Errorf("file name contains globbing character")) - } - - return directory, fileName, err -} - // Setup creates a new logger with the specified configuration and writer -func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { +func Setup(config LogConfig, w io.Writer) (log.InterceptLogger, error) { // Validate the log level - if config.isLevelInvalid() { - return nil, fmt.Errorf("invalid log level: %v", config.LogLevel) + if config.logLevel.String() == "unknown" { + return nil, fmt.Errorf("invalid log level: %v", config.logLevel) } // If out is os.Stdout and Vault is being run as a Windows Service, writes will @@ -115,41 +77,24 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { // noErrorWriter is used as a wrapper to suppress any errors when writing to out. writers := []io.Writer{noErrorWriter{w: w}} - // Create a file logger if the user has specified the path to the log file - if config.LogFilePath != "" { - dir, fileName, err := parseFullPath(config.LogFilePath) - if err != nil { - return nil, err - } - - if config.LogRotateDuration == 0 { - config.LogRotateDuration = defaultRotateDuration - } - - logFile := &LogFile{ - fileName: fileName, - logPath: dir, - duration: config.LogRotateDuration, - maxBytes: config.LogRotateBytes, - maxArchivedFiles: config.LogRotateMaxFiles, - } - if err := logFile.pruneFiles(); err != nil { - return nil, fmt.Errorf("failed to prune log files: %w", err) + if config.logFilePath != "" { + dir, fileName := filepath.Split(config.logFilePath) + if fileName == "" { + fileName = "vault-agent.log" } + logFile := NewLogFile(dir, fileName) if err := logFile.openNew(); err != nil { - return nil, fmt.Errorf("failed to setup logging: %w", err) + return nil, fmt.Errorf("failed to set up file logging: %w", err) } writers = append(writers, logFile) } logger := log.NewInterceptLogger(&log.LoggerOptions{ - Name: config.Name, - Level: config.LogLevel, - IndependentLevels: true, - Output: io.MultiWriter(writers...), - JSONFormat: config.isFormatJson(), + Name: config.name, + Level: config.logLevel, + Output: io.MultiWriter(writers...), + JSONFormat: config.IsFormatJson(), }) - return logger, nil } @@ -193,12 +138,21 @@ func ParseLogLevel(logLevel string) (log.Level, error) { // TranslateLoggerLevel returns the string that corresponds with logging level of the hclog.Logger. func TranslateLoggerLevel(logger log.Logger) (string, error) { - logLevel := logger.GetLevel() - - switch logLevel { - case log.Trace, log.Debug, log.Info, log.Warn, log.Error: - return logLevel.String(), nil - default: + var result string + + if logger.IsTrace() { + result = "trace" + } else if logger.IsDebug() { + result = "debug" + } else if logger.IsInfo() { + result = "info" + } else if logger.IsWarn() { + result = "warn" + } else if logger.IsError() { + result = "error" + } else { return "", fmt.Errorf("unknown log level") } + + return result, nil } diff --git a/helper/logging/logger_test.go b/helper/logging/logger_test.go deleted file mode 100644 index 30ff1783a776f..0000000000000 --- a/helper/logging/logger_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package logging - -import ( - "bytes" - "encoding/json" - "errors" - "os" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLogger_SetupBasic(t *testing.T) { - cfg := &LogConfig{Name: "test-system", LogLevel: log.Info} - - logger, err := Setup(cfg, nil) - require.NoError(t, err) - require.NotNil(t, logger) - require.Equal(t, logger.Name(), "test-system") - require.True(t, logger.IsInfo()) -} - -func TestLogger_SetupInvalidLogLevel(t *testing.T) { - cfg := &LogConfig{} - - _, err := Setup(cfg, nil) - assert.Containsf(t, err.Error(), "invalid log level", "expected error %s", err) -} - -func TestLogger_SetupLoggerErrorLevel(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Error, - } - - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.NoError(t, err) - require.NotNil(t, logger) - - logger.Error("test error msg") - logger.Info("test info msg") - - output := buf.String() - - require.Contains(t, output, "[ERROR] test error msg") - require.NotContains(t, output, "[INFO] test info msg") -} - -func TestLogger_SetupLoggerDebugLevel(t *testing.T) { - cfg := LogConfig{LogLevel: log.Debug} - var buf bytes.Buffer - - logger, err := Setup(&cfg, &buf) - require.NoError(t, err) - require.NotNil(t, logger) - - logger.Info("test info msg") - logger.Debug("test debug msg") - - output := buf.String() - - require.Contains(t, output, "[INFO] test info msg") - require.Contains(t, output, "[DEBUG] test debug msg") -} - -func TestLogger_SetupLoggerWithName(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - Name: "test-system", - } - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.NoError(t, err) - require.NotNil(t, logger) - - logger.Warn("test warn msg") - - require.Contains(t, buf.String(), "[WARN] test-system: test warn msg") -} - -func TestLogger_SetupLoggerWithJSON(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - LogFormat: JSONFormat, - Name: "test-system", - } - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.NoError(t, err) - require.NotNil(t, logger) - - logger.Warn("test warn msg") - - var jsonOutput map[string]string - err = json.Unmarshal(buf.Bytes(), &jsonOutput) - require.NoError(t, err) - require.Contains(t, jsonOutput, "@level") - require.Equal(t, jsonOutput["@level"], "warn") - require.Contains(t, jsonOutput, "@message") - require.Equal(t, jsonOutput["@message"], "test warn msg") -} - -func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { - tmpDir := t.TempDir() - - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tmpDir, //+ "/", - } - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.NoError(t, err) - require.NotNil(t, logger) -} - -func TestLogger_SetupLoggerWithInValidLogPath(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: "nonexistentdir/", - } - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.Error(t, err) - require.True(t, errors.Is(err, os.ErrNotExist)) - require.Nil(t, logger) -} - -func TestLogger_SetupLoggerWithInValidLogPathPermission(t *testing.T) { - tmpDir := "/tmp/" + t.Name() - - err := os.Mkdir(tmpDir, 0o000) - assert.NoError(t, err, "unexpected error testing with invalid log path permission") - defer os.RemoveAll(tmpDir) - - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tmpDir + "/", - } - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.Error(t, err) - require.True(t, errors.Is(err, os.ErrPermission)) - require.Nil(t, logger) -} - -func TestLogger_SetupLoggerWithInvalidLogFilePath(t *testing.T) { - cases := map[string]struct { - path string - message string - }{ - "file name *": { - path: "/this/isnt/ok/juan*.log", - message: "file name contains globbing character", - }, - "file name ?": { - path: "/this/isnt/ok/juan?.log", - message: "file name contains globbing character", - }, - "file name [": { - path: "/this/isnt/ok/[juan].log", - message: "file name contains globbing character", - }, - "directory path *": { - path: "/this/isnt/ok/*/qwerty.log", - message: "directory contains glob character", - }, - "directory path ?": { - path: "/this/isnt/ok/?/qwerty.log", - message: "directory contains glob character", - }, - "directory path [": { - path: "/this/isnt/ok/[foo]/qwerty.log", - message: "directory contains glob character", - }, - } - - for name, tc := range cases { - name := name - tc := tc - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tc.path, - } - _, err := Setup(cfg, &bytes.Buffer{}) - assert.Error(t, err, "%s: expected error due to *", name) - assert.Contains(t, err.Error(), tc.message, "%s: error message does not match: %s", name, err.Error()) - } -} - -func TestLogger_ChangeLogLevels(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - Name: "test-system", - } - var buf bytes.Buffer - - logger, err := Setup(cfg, &buf) - require.NoError(t, err) - require.NotNil(t, logger) - - assert.Equal(t, log.Debug, logger.GetLevel()) - - // Create new named loggers from the base logger and change the levels - logger2 := logger.Named("test2") - logger3 := logger.Named("test3") - - logger2.SetLevel(log.Info) - logger3.SetLevel(log.Error) - - assert.Equal(t, log.Debug, logger.GetLevel()) - assert.Equal(t, log.Info, logger2.GetLevel()) - assert.Equal(t, log.Error, logger3.GetLevel()) -} diff --git a/helper/metricsutil/bucket.go b/helper/metricsutil/bucket.go index 0f602e22ad153..9cbb2cdc2dafd 100644 --- a/helper/metricsutil/bucket.go +++ b/helper/metricsutil/bucket.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( diff --git a/helper/metricsutil/bucket_test.go b/helper/metricsutil/bucket_test.go index 1179e4dbdcef7..f37584781627f 100644 --- a/helper/metricsutil/bucket_test.go +++ b/helper/metricsutil/bucket_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( diff --git a/helper/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go index c6fcd56639a2a..0ad0e9d876cf9 100644 --- a/helper/metricsutil/gauge_process.go +++ b/helper/metricsutil/gauge_process.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( @@ -11,9 +8,24 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/timeutil" ) +// This interface allows unit tests to substitute in a simulated clock. +type clock interface { + Now() time.Time + NewTicker(time.Duration) *time.Ticker +} + +type defaultClock struct{} + +func (_ defaultClock) Now() time.Time { + return time.Now() +} + +func (_ defaultClock) NewTicker(d time.Duration) *time.Ticker { + return time.NewTicker(d) +} + // GaugeLabelValues is one gauge in a set sharing a single key, that // are measured in a batch. type GaugeLabelValues struct { @@ -61,7 +73,7 @@ type GaugeCollectionProcess struct { maxGaugeCardinality int // time source - clock timeutil.Clock + clock clock } // NewGaugeCollectionProcess creates a new collection process for the callback @@ -86,7 +98,7 @@ func NewGaugeCollectionProcess( gaugeInterval, maxGaugeCardinality, logger, - timeutil.DefaultClock{}, + defaultClock{}, ) } @@ -109,7 +121,7 @@ func (m *ClusterMetricSink) NewGaugeCollectionProcess( m.GaugeInterval, m.MaxGaugeCardinality, logger, - timeutil.DefaultClock{}, + defaultClock{}, ) } @@ -122,7 +134,7 @@ func newGaugeCollectionProcessWithClock( gaugeInterval time.Duration, maxGaugeCardinality int, logger log.Logger, - clock timeutil.Clock, + clock clock, ) (*GaugeCollectionProcess, error) { process := &GaugeCollectionProcess{ stop: make(chan struct{}, 1), diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go index efd74e707df80..9971714e04e30 100644 --- a/helper/metricsutil/gauge_process_test.go +++ b/helper/metricsutil/gauge_process_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( @@ -15,7 +12,6 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/timeutil" ) // SimulatedTime maintains a virtual clock so the test isn't @@ -25,10 +21,9 @@ import ( type SimulatedTime struct { now time.Time tickerBarrier chan *SimulatedTicker - timeutil.DefaultClock } -var _ timeutil.Clock = &SimulatedTime{} +var _ clock = &SimulatedTime{} type SimulatedTicker struct { ticker *time.Ticker @@ -123,7 +118,7 @@ func TestGauge_Creation(t *testing.T) { t.Fatalf("Error creating collection process: %v", err) } - if _, ok := p.clock.(timeutil.DefaultClock); !ok { + if _, ok := p.clock.(defaultClock); !ok { t.Error("Default clock not installed.") } diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go index cfc2e1109655f..de85c7e4628e9 100644 --- a/helper/metricsutil/metricsutil.go +++ b/helper/metricsutil/metricsutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( diff --git a/helper/metricsutil/metricsutil_test.go b/helper/metricsutil/metricsutil_test.go index f8f17fedb3611..1b817ddad192d 100644 --- a/helper/metricsutil/metricsutil_test.go +++ b/helper/metricsutil/metricsutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go index e3df058e11659..67deb3bee1cd1 100644 --- a/helper/metricsutil/wrapped_metrics.go +++ b/helper/metricsutil/wrapped_metrics.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( diff --git a/helper/metricsutil/wrapped_metrics_test.go b/helper/metricsutil/wrapped_metrics_test.go index b65809b46109a..c0fb2c386677d 100644 --- a/helper/metricsutil/wrapped_metrics_test.go +++ b/helper/metricsutil/wrapped_metrics_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package metricsutil import ( diff --git a/helper/monitor/monitor.go b/helper/monitor/monitor.go index 28ecf0eee5710..490e2fa08bbaf 100644 --- a/helper/monitor/monitor.go +++ b/helper/monitor/monitor.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package monitor import ( diff --git a/helper/monitor/monitor_test.go b/helper/monitor/monitor_test.go index 06e1e01777827..0133a351b99d5 100644 --- a/helper/monitor/monitor_test.go +++ b/helper/monitor/monitor_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package monitor import ( diff --git a/helper/namespace/namespace.go b/helper/namespace/namespace.go index 04a5b79dbec81..c1226a5547f9a 100644 --- a/helper/namespace/namespace.go +++ b/helper/namespace/namespace.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package namespace import ( diff --git a/helper/namespace/namespace_test.go b/helper/namespace/namespace_test.go index fd4c4c2f9988c..442b46b904470 100644 --- a/helper/namespace/namespace_test.go +++ b/helper/namespace/namespace_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package namespace import ( diff --git a/helper/osutil/fileinfo.go b/helper/osutil/fileinfo.go index f14db6b9c8bf1..59b99859ebae4 100644 --- a/helper/osutil/fileinfo.go +++ b/helper/osutil/fileinfo.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package osutil import ( diff --git a/helper/osutil/fileinfo_test.go b/helper/osutil/fileinfo_test.go index 8c3316bc91abd..febd11966a35b 100644 --- a/helper/osutil/fileinfo_test.go +++ b/helper/osutil/fileinfo_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package osutil import ( diff --git a/helper/osutil/fileinfo_unix.go b/helper/osutil/fileinfo_unix.go index bb60c498797de..c49a591ce9c66 100644 --- a/helper/osutil/fileinfo_unix.go +++ b/helper/osutil/fileinfo_unix.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !windows package osutil diff --git a/helper/osutil/fileinfo_unix_test.go b/helper/osutil/fileinfo_unix_test.go index 302bd9e16847b..c31ca5bdc3722 100644 --- a/helper/osutil/fileinfo_unix_test.go +++ b/helper/osutil/fileinfo_unix_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !windows package osutil diff --git a/helper/osutil/fileinfo_windows.go b/helper/osutil/fileinfo_windows.go index 193fe3ff8420c..0869c97e7d98a 100644 --- a/helper/osutil/fileinfo_windows.go +++ b/helper/osutil/fileinfo_windows.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build windows package osutil diff --git a/helper/parseip/parseip.go b/helper/parseip/parseip.go index 95579633b5092..414a3f05c81be 100644 --- a/helper/parseip/parseip.go +++ b/helper/parseip/parseip.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package parseip import ( diff --git a/helper/parseip/parseip_test.go b/helper/parseip/parseip_test.go index e26c810be677f..5d2b3645b853d 100644 --- a/helper/parseip/parseip_test.go +++ b/helper/parseip/parseip_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package parseip import ( diff --git a/helper/pgpkeys/encrypt_decrypt.go b/helper/pgpkeys/encrypt_decrypt.go index c7a8027cd2ce8..31678df69f4c1 100644 --- a/helper/pgpkeys/encrypt_decrypt.go +++ b/helper/pgpkeys/encrypt_decrypt.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pgpkeys import ( @@ -8,8 +5,8 @@ import ( "encoding/base64" "fmt" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) // EncryptShares takes an ordered set of byte slices to encrypt and the diff --git a/helper/pgpkeys/flag.go b/helper/pgpkeys/flag.go index 79d114b4d9cc1..4490d891dc9ed 100644 --- a/helper/pgpkeys/flag.go +++ b/helper/pgpkeys/flag.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pgpkeys import ( @@ -11,7 +8,7 @@ import ( "os" "strings" - "github.com/ProtonMail/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp" ) // PubKeyFileFlag implements flag.Value and command.Example to receive exactly diff --git a/helper/pgpkeys/flag_test.go b/helper/pgpkeys/flag_test.go index 9ea25d44d5898..86e04611f1cfe 100644 --- a/helper/pgpkeys/flag_test.go +++ b/helper/pgpkeys/flag_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pgpkeys import ( @@ -15,8 +12,8 @@ import ( "strings" "testing" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) func TestPubKeyFilesFlag_implements(t *testing.T) { diff --git a/helper/pgpkeys/keybase.go b/helper/pgpkeys/keybase.go index b24e4bf231c8a..a9dde2bdd7aed 100644 --- a/helper/pgpkeys/keybase.go +++ b/helper/pgpkeys/keybase.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pgpkeys import ( @@ -9,9 +6,9 @@ import ( "fmt" "strings" - "github.com/ProtonMail/go-crypto/openpgp" cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/keybase/go-crypto/openpgp" ) const ( diff --git a/helper/pgpkeys/keybase_test.go b/helper/pgpkeys/keybase_test.go index 7d59899fd9e53..c261e6f14c421 100644 --- a/helper/pgpkeys/keybase_test.go +++ b/helper/pgpkeys/keybase_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pgpkeys import ( @@ -10,8 +7,8 @@ import ( "reflect" "testing" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) func TestFetchKeybasePubkeys(t *testing.T) { diff --git a/helper/pgpkeys/test_keys.go b/helper/pgpkeys/test_keys.go index be97698d1216e..c10a9055ed00e 100644 --- a/helper/pgpkeys/test_keys.go +++ b/helper/pgpkeys/test_keys.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pgpkeys const ( diff --git a/helper/policies/policies.go b/helper/policies/policies.go index 2a34602057980..729ce10b2fc65 100644 --- a/helper/policies/policies.go +++ b/helper/policies/policies.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package policies import "sort" diff --git a/helper/policies/policies_test.go b/helper/policies/policies_test.go index 6356dee18a043..ba9b0a8f70b05 100644 --- a/helper/policies/policies_test.go +++ b/helper/policies/policies_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package policies import "testing" diff --git a/helper/proxyutil/proxyutil.go b/helper/proxyutil/proxyutil.go index b0f06d6b9f260..fdb20973e003f 100644 --- a/helper/proxyutil/proxyutil.go +++ b/helper/proxyutil/proxyutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package proxyutil import ( diff --git a/helper/random/parser.go b/helper/random/parser.go index c5e82c8c1e5a1..3184db8aa5c62 100644 --- a/helper/random/parser.go +++ b/helper/random/parser.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/parser_test.go b/helper/random/parser_test.go index f8af5a5eb920b..59cdb81430438 100644 --- a/helper/random/parser_test.go +++ b/helper/random/parser_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/random_api.go b/helper/random/random_api.go index 5bb9316b15be5..9bc89f9af7e54 100644 --- a/helper/random/random_api.go +++ b/helper/random/random_api.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/registry.go b/helper/random/registry.go index 334df734e24ef..efdcf5c302f58 100644 --- a/helper/random/registry.go +++ b/helper/random/registry.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/registry_test.go b/helper/random/registry_test.go index 21297aaf21f59..3d7060650b4fd 100644 --- a/helper/random/registry_test.go +++ b/helper/random/registry_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/rules.go b/helper/random/rules.go index 05cc800c91e88..fead5b4ffe65d 100644 --- a/helper/random/rules.go +++ b/helper/random/rules.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/rules_test.go b/helper/random/rules_test.go index e85df503b0a13..18aa008798265 100644 --- a/helper/random/rules_test.go +++ b/helper/random/rules_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/serializing.go b/helper/random/serializing.go index 5b68d32758740..93371df028784 100644 --- a/helper/random/serializing.go +++ b/helper/random/serializing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/serializing_test.go b/helper/random/serializing_test.go index bfa17ae266e70..171053742993e 100644 --- a/helper/random/serializing_test.go +++ b/helper/random/serializing_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/random/string_generator.go b/helper/random/string_generator.go index 48f08ed8924af..96dd69ec60e3b 100644 --- a/helper/random/string_generator.go +++ b/helper/random/string_generator.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( @@ -70,7 +67,7 @@ func sortCharset(chars string) string { return string(r) } -// StringGenerator generates random strings from the provided charset & adhering to a set of rules. The set of rules +// StringGenerator generats random strings from the provided charset & adhering to a set of rules. The set of rules // are things like CharsetRule which requires a certain number of characters from a sub-charset. type StringGenerator struct { // Length of the string to generate. diff --git a/helper/random/string_generator_test.go b/helper/random/string_generator_test.go index 8307ff73a485c..af4e7da149626 100644 --- a/helper/random/string_generator_test.go +++ b/helper/random/string_generator_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package random import ( diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 2e69f3a27d7a3..4633d523bea53 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package storagepacker import ( diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go index ad76107afbf42..cc2448b2bc90a 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package storagepacker import ( diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 6dd58b96d37d6..6bed81c48a152 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: helper/storagepacker/types.proto package storagepacker diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 7efb0a11ef983..4edfaf4f85721 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/helper/storagepacker"; diff --git a/helper/testhelpers/azurite/azurite.go b/helper/testhelpers/azurite/azurite.go index a538e5c87eb03..13d65750d4cec 100644 --- a/helper/testhelpers/azurite/azurite.go +++ b/helper/testhelpers/azurite/azurite.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package azurite import ( @@ -10,7 +7,7 @@ import ( "testing" "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) type Config struct { diff --git a/helper/testhelpers/cassandra/cassandrahelper.go b/helper/testhelpers/cassandra/cassandrahelper.go index 8e970e836a9cc..9d8ceeea8c1f6 100644 --- a/helper/testhelpers/cassandra/cassandrahelper.go +++ b/helper/testhelpers/cassandra/cassandrahelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( @@ -9,13 +6,11 @@ import ( "net" "os" "path/filepath" - "runtime" - "strings" "testing" "time" "github.com/gocql/gocql" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) type containerConfig struct { @@ -82,12 +77,6 @@ func (h Host) ConnectionURL() string { func PrepareTestContainer(t *testing.T, opts ...ContainerOpt) (Host, func()) { t.Helper() - - // Skipping on ARM, as this image can't run on ARM architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - if os.Getenv("CASSANDRA_HOSTS") != "" { host, port, err := net.SplitHostPort(os.Getenv("CASSANDRA_HOSTS")) if err != nil { @@ -101,10 +90,9 @@ func PrepareTestContainer(t *testing.T, opts ...ContainerOpt) (Host, func()) { } containerCfg := &containerConfig{ - imageName: "docker.mirror.hashicorp.services/library/cassandra", - containerName: "cassandra", - version: "3.11", - env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"}, + imageName: "cassandra", + version: "3.11", + env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"}, } for _, opt := range opts { diff --git a/helper/testhelpers/certhelpers/cert_helpers.go b/helper/testhelpers/certhelpers/cert_helpers.go index 42692d01f6a6c..b84bbf961e5a2 100644 --- a/helper/testhelpers/certhelpers/cert_helpers.go +++ b/helper/testhelpers/certhelpers/cert_helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package certhelpers import ( diff --git a/helper/testhelpers/consul/consulhelper.go b/helper/testhelpers/consul/consulhelper.go index e8c160b8f6aa5..e88ead2bfad56 100644 --- a/helper/testhelpers/consul/consulhelper.go +++ b/helper/testhelpers/consul/consulhelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -11,7 +8,7 @@ import ( consulapi "github.com/hashicorp/consul/api" goversion "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) type Config struct { @@ -61,7 +58,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo if isEnterprise { version += "-ent" name = "consul-enterprise" - repo = "docker.mirror.hashicorp.services/hashicorp/consul-enterprise" + repo = "hashicorp/consul-enterprise" license, hasLicense := os.LookupEnv("CONSUL_LICENSE") envVars = append(envVars, "CONSUL_LICENSE="+license) diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go deleted file mode 100644 index 846db21da17a9..0000000000000 --- a/helper/testhelpers/corehelpers/corehelpers.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Package corehelpers contains testhelpers that don't depend on package vault, -// and thus can be used within vault (as well as elsewhere.) -package corehelpers - -import ( - "bytes" - "context" - "crypto/sha256" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/plugins/database/mysql" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/go-testing-interface" -) - -// RetryUntil runs f until it returns a nil result or the timeout is reached. -// If a nil result hasn't been obtained by timeout, calls t.Fatal. -func RetryUntil(t testing.T, timeout time.Duration, f func() error) { - t.Helper() - deadline := time.Now().Add(timeout) - var err error - for time.Now().Before(deadline) { - if err = f(); err == nil { - return - } - time.Sleep(100 * time.Millisecond) - } - t.Fatalf("did not complete before deadline, err: %v", err) -} - -// MakeTestPluginDir creates a temporary directory suitable for holding plugins. -// This helper also resolves symlinks to make tests happy on OS X. -func MakeTestPluginDir(t testing.T) (string, func(t testing.T)) { - if t != nil { - t.Helper() - } - - dir, err := os.MkdirTemp("", "") - if err != nil { - if t == nil { - panic(err) - } - t.Fatal(err) - } - - // OSX tempdir are /var, but actually symlinked to /private/var - dir, err = filepath.EvalSymlinks(dir) - if err != nil { - if t == nil { - panic(err) - } - t.Fatal(err) - } - - return dir, func(t testing.T) { - if err := os.RemoveAll(dir); err != nil { - if t == nil { - panic(err) - } - t.Fatal(err) - } - } -} - -func NewMockBuiltinRegistry() *mockBuiltinRegistry { - return &mockBuiltinRegistry{ - forTesting: map[string]mockBackend{ - "mysql-database-plugin": {PluginType: consts.PluginTypeDatabase}, - "postgresql-database-plugin": {PluginType: consts.PluginTypeDatabase}, - "approle": {PluginType: consts.PluginTypeCredential}, - "pending-removal-test-plugin": { - PluginType: consts.PluginTypeCredential, - DeprecationStatus: consts.PendingRemoval, - }, - "aws": {PluginType: consts.PluginTypeCredential}, - "consul": {PluginType: consts.PluginTypeSecrets}, - }, - } -} - -type mockBackend struct { - consts.PluginType - consts.DeprecationStatus -} - -type mockBuiltinRegistry struct { - forTesting map[string]mockBackend -} - -func toFunc(f logical.Factory) func() (interface{}, error) { - return func() (interface{}, error) { - return f, nil - } -} - -func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) { - testBackend, ok := m.forTesting[name] - if !ok { - return nil, false - } - testPluginType := testBackend.PluginType - if pluginType != testPluginType { - return nil, false - } - - switch name { - case "approle", "pending-removal-test-plugin": - return toFunc(approle.Factory), true - case "aws": - return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - b := new(framework.Backend) - b.Setup(ctx, config) - b.BackendType = logical.TypeCredential - return b, nil - }), true - case "postgresql-database-plugin": - return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - b := new(framework.Backend) - b.Setup(ctx, config) - b.BackendType = logical.TypeLogical - return b, nil - }), true - case "mysql-database-plugin": - return mysql.New(mysql.DefaultUserNameTemplate), true - case "consul": - return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - b := new(framework.Backend) - b.Setup(ctx, config) - b.BackendType = logical.TypeLogical - return b, nil - }), true - default: - return nil, false - } -} - -// Keys only supports getting a realistic list of the keys for database plugins, -// and approle -func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { - switch pluginType { - case consts.PluginTypeDatabase: - // This is a hard-coded reproduction of the db plugin keys in - // helper/builtinplugins/registry.go. The registry isn't directly used - // because it causes import cycles. - return []string{ - "mysql-database-plugin", - "mysql-aurora-database-plugin", - "mysql-rds-database-plugin", - "mysql-legacy-database-plugin", - - "cassandra-database-plugin", - "couchbase-database-plugin", - "elasticsearch-database-plugin", - "hana-database-plugin", - "influxdb-database-plugin", - "mongodb-database-plugin", - "mongodbatlas-database-plugin", - "mssql-database-plugin", - "postgresql-database-plugin", - "redis-elasticache-database-plugin", - "redshift-database-plugin", - "redis-database-plugin", - "snowflake-database-plugin", - } - case consts.PluginTypeCredential: - return []string{ - "pending-removal-test-plugin", - "approle", - } - } - return []string{} -} - -func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool { - for _, key := range m.Keys(pluginType) { - if key == name { - return true - } - } - return false -} - -func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.PluginType) (consts.DeprecationStatus, bool) { - if m.Contains(name, pluginType) { - return m.forTesting[name].DeprecationStatus, true - } - - return consts.Unknown, false -} - -func TestNoopAudit(t testing.T, config map[string]string) *NoopAudit { - n, err := NewNoopAudit(config) - if err != nil { - t.Fatal(err) - } - return n -} - -func NewNoopAudit(config map[string]string) (*NoopAudit, error) { - view := &logical.InmemStorage{} - err := view.Put(context.Background(), &logical.StorageEntry{ - Key: "salt", - Value: []byte("foo"), - }) - if err != nil { - return nil, err - } - - n := &NoopAudit{ - Config: &audit.BackendConfig{ - SaltView: view, - SaltConfig: &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }, - Config: config, - }, - } - n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - SaltFunc: n.Salt, - } - return n, nil -} - -func NoopAuditFactory(records **[][]byte) audit.Factory { - return func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) { - n, err := NewNoopAudit(config.Config) - if err != nil { - return nil, err - } - if records != nil { - *records = &n.records - } - return n, nil - } -} - -type NoopAudit struct { - Config *audit.BackendConfig - ReqErr error - ReqAuth []*logical.Auth - Req []*logical.Request - ReqHeaders []map[string][]string - ReqNonHMACKeys []string - ReqErrs []error - - RespErr error - RespAuth []*logical.Auth - RespReq []*logical.Request - Resp []*logical.Response - RespNonHMACKeys [][]string - RespReqNonHMACKeys [][]string - RespErrs []error - - formatter audit.AuditFormatter - records [][]byte - l sync.RWMutex - salt *salt.Salt - saltMutex sync.RWMutex -} - -func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { - n.l.Lock() - defer n.l.Unlock() - if n.formatter.AuditFormatWriter != nil { - var w bytes.Buffer - err := n.formatter.FormatRequest(ctx, &w, audit.FormatterConfig{}, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) - } - - n.ReqAuth = append(n.ReqAuth, in.Auth) - n.Req = append(n.Req, in.Request) - n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers) - n.ReqNonHMACKeys = in.NonHMACReqDataKeys - n.ReqErrs = append(n.ReqErrs, in.OuterErr) - - return n.ReqErr -} - -func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { - n.l.Lock() - defer n.l.Unlock() - - if n.formatter.AuditFormatWriter != nil { - var w bytes.Buffer - err := n.formatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) - } - - n.RespAuth = append(n.RespAuth, in.Auth) - n.RespReq = append(n.RespReq, in.Request) - n.Resp = append(n.Resp, in.Response) - n.RespErrs = append(n.RespErrs, in.OuterErr) - - if in.Response != nil { - n.RespNonHMACKeys = append(n.RespNonHMACKeys, in.NonHMACRespDataKeys) - n.RespReqNonHMACKeys = append(n.RespReqNonHMACKeys, in.NonHMACReqDataKeys) - } - - return n.RespErr -} - -func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - n.l.Lock() - defer n.l.Unlock() - var w bytes.Buffer - tempFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - err := tempFormatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) - return nil -} - -func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) { - n.saltMutex.RLock() - if n.salt != nil { - defer n.saltMutex.RUnlock() - return n.salt, nil - } - n.saltMutex.RUnlock() - n.saltMutex.Lock() - defer n.saltMutex.Unlock() - if n.salt != nil { - return n.salt, nil - } - salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) - if err != nil { - return nil, err - } - n.salt = salt - return salt, nil -} - -func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) { - salt, err := n.Salt(ctx) - if err != nil { - return "", err - } - return salt.GetIdentifiedHMAC(data), nil -} - -func (n *NoopAudit) Reload(ctx context.Context) error { - return nil -} - -func (n *NoopAudit) Invalidate(ctx context.Context) { - n.saltMutex.Lock() - defer n.saltMutex.Unlock() - n.salt = nil -} - -type TestLogger struct { - hclog.Logger - Path string - File *os.File - sink hclog.SinkAdapter -} - -func NewTestLogger(t testing.T) *TestLogger { - var logFile *os.File - var logPath string - output := os.Stderr - - logDir := os.Getenv("VAULT_TEST_LOG_DIR") - if logDir != "" { - logPath = filepath.Join(logDir, t.Name()+".log") - // t.Name may include slashes. - dir, _ := filepath.Split(logPath) - err := os.MkdirAll(dir, 0o755) - if err != nil { - t.Fatal(err) - } - logFile, err = os.Create(logPath) - if err != nil { - t.Fatal(err) - } - output = logFile - } - - // We send nothing on the regular logger, that way we can later deregister - // the sink to stop logging during cluster cleanup. - logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ - Output: ioutil.Discard, - IndependentLevels: true, - }) - sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{ - Output: output, - Level: hclog.Trace, - IndependentLevels: true, - }) - logger.RegisterSink(sink) - return &TestLogger{ - Path: logPath, - File: logFile, - Logger: logger, - sink: sink, - } -} - -func (tl *TestLogger) StopLogging() { - tl.Logger.(hclog.InterceptLogger).DeregisterSink(tl.sink) -} diff --git a/helper/testhelpers/docker/testhelpers.go b/helper/testhelpers/docker/testhelpers.go new file mode 100644 index 0000000000000..88f377d573033 --- /dev/null +++ b/helper/testhelpers/docker/testhelpers.go @@ -0,0 +1,333 @@ +package docker + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/cenkalti/backoff/v3" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/archive" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-uuid" +) + +type Runner struct { + DockerAPI *client.Client + RunOptions RunOptions +} + +type RunOptions struct { + ImageRepo string + ImageTag string + ContainerName string + Cmd []string + Entrypoint []string + Env []string + NetworkID string + CopyFromTo map[string]string + Ports []string + DoNotAutoRemove bool + AuthUsername string + AuthPassword string + LogConsumer func(string) +} + +func NewServiceRunner(opts RunOptions) (*Runner, error) { + dapi, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.39")) + if err != nil { + return nil, err + } + + if opts.NetworkID == "" { + opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") + } + if opts.ContainerName == "" { + if strings.Contains(opts.ImageRepo, "/") { + return nil, fmt.Errorf("ContainerName is required for non-library images") + } + // If there's no slash in the repo it's almost certainly going to be + // a good container name. + opts.ContainerName = opts.ImageRepo + } + return &Runner{ + DockerAPI: dapi, + RunOptions: opts, + }, nil +} + +type ServiceConfig interface { + Address() string + URL() *url.URL +} + +func NewServiceHostPort(host string, port int) *ServiceHostPort { + return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} +} + +func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { + pieces := strings.Split(s, ":") + if len(pieces) != 2 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + port, err := strconv.Atoi(pieces[1]) + if err != nil || port < 1 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + return &ServiceHostPort{s}, nil +} + +type ServiceHostPort struct { + address string +} + +func (s ServiceHostPort) Address() string { + return s.address +} + +func (s ServiceHostPort) URL() *url.URL { + return &url.URL{Host: s.address} +} + +func NewServiceURLParse(s string) (*ServiceURL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + return &ServiceURL{u: *u}, nil +} + +func NewServiceURL(u url.URL) *ServiceURL { + return &ServiceURL{u: u} +} + +type ServiceURL struct { + u url.URL +} + +func (s ServiceURL) Address() string { + return s.u.Host +} + +func (s ServiceURL) URL() *url.URL { + return &s.u +} + +// ServiceAdapter verifies connectivity to the service, then returns either the +// connection string (typically a URL) and nil, or empty string and an error. +type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) + +func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { + container, hostIPs, err := d.Start(context.Background()) + if err != nil { + return nil, err + } + + cleanup := func() { + if d.RunOptions.LogConsumer != nil { + rc, err := d.DockerAPI.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + }) + if err == nil { + b, err := ioutil.ReadAll(rc) + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs, err=%v, read: %s", err, string(b))) + } else { + d.RunOptions.LogConsumer(string(b)) + } + } + } + + for i := 0; i < 10; i++ { + err := d.DockerAPI.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + if err == nil { + return + } + time.Sleep(1 * time.Second) + } + } + + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = time.Second * 5 + bo.MaxElapsedTime = 2 * time.Minute + + pieces := strings.Split(hostIPs[0], ":") + portInt, err := strconv.Atoi(pieces[1]) + if err != nil { + return nil, err + } + + var config ServiceConfig + err = backoff.Retry(func() error { + c, err := connect(ctx, pieces[0], portInt) + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("service adapter returned nil error and config") + } + config = c + return nil + }, bo) + + if err != nil { + if !d.RunOptions.DoNotAutoRemove { + cleanup() + } + return nil, err + } + + return &Service{ + Config: config, + Cleanup: cleanup, + Container: container, + }, nil +} + +type Service struct { + Config ServiceConfig + Cleanup func() + Container *types.ContainerJSON +} + +func (d *Runner) Start(ctx context.Context) (*types.ContainerJSON, []string, error) { + suffix, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + name := d.RunOptions.ContainerName + "-" + suffix + + cfg := &container.Config{ + Hostname: name, + Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), + Env: d.RunOptions.Env, + Cmd: d.RunOptions.Cmd, + } + if len(d.RunOptions.Ports) > 0 { + cfg.ExposedPorts = make(map[nat.Port]struct{}) + for _, p := range d.RunOptions.Ports { + cfg.ExposedPorts[nat.Port(p)] = struct{}{} + } + } + if len(d.RunOptions.Entrypoint) > 0 { + cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) + } + + hostConfig := &container.HostConfig{ + AutoRemove: !d.RunOptions.DoNotAutoRemove, + PublishAllPorts: true, + } + + netConfig := &network.NetworkingConfig{} + if d.RunOptions.NetworkID != "" { + netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + d.RunOptions.NetworkID: {}, + } + } + + // best-effort pull + var opts types.ImageCreateOptions + if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { + var buf bytes.Buffer + auth := map[string]string{ + "username": d.RunOptions.AuthUsername, + "password": d.RunOptions.AuthPassword, + } + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, nil, err + } + opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) + if resp != nil { + _, _ = ioutil.ReadAll(resp) + } + + c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) + if err != nil { + return nil, nil, fmt.Errorf("container create failed: %v", err) + } + + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) + return nil, nil, err + } + } + + err = d.DockerAPI.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) + return nil, nil, fmt.Errorf("container start failed: %v", err) + } + + inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) + return nil, nil, err + } + + var addrs []string + for _, port := range d.RunOptions.Ports { + pieces := strings.Split(port, "/") + if len(pieces) < 2 { + return nil, nil, fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) + } + if d.RunOptions.NetworkID != "" { + addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) + } else { + mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] + if !ok || len(mapped) == 0 { + return nil, nil, fmt.Errorf("no port mapping found for %s", port) + } + + addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) + } + } + + return &inspect, addrs, nil +} + +func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { + srcInfo, err := archive.CopyInfoSourcePath(from, false) + if err != nil { + return fmt.Errorf("error copying from source %q: %v", from, err) + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return fmt.Errorf("error creating tar from source %q: %v", from, err) + } + defer srcArchive.Close() + + dstInfo := archive.CopyInfo{Path: to} + + dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) + } + defer content.Close() + err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) + if err != nil { + return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) + } + + return nil +} diff --git a/helper/testhelpers/etcd/etcdhelper.go b/helper/testhelpers/etcd/etcdhelper.go index a5b2578439c98..1051dd1405a44 100644 --- a/helper/testhelpers/etcd/etcdhelper.go +++ b/helper/testhelpers/etcd/etcdhelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package etcd import ( @@ -11,7 +8,7 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" clientv3 "go.etcd.io/etcd/client/v3" ) diff --git a/helper/testhelpers/fakegcsserver/fake-gcs-server.go b/helper/testhelpers/fakegcsserver/fake-gcs-server.go index 503824e99e2df..59c9001adc292 100644 --- a/helper/testhelpers/fakegcsserver/fake-gcs-server.go +++ b/helper/testhelpers/fakegcsserver/fake-gcs-server.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package fakegcsserver import ( @@ -12,7 +9,7 @@ import ( "testing" "cloud.google.com/go/storage" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "google.golang.org/api/iterator" "google.golang.org/api/option" ) @@ -30,7 +27,7 @@ func PrepareTestContainer(t *testing.T, version string) (func(), docker.ServiceC } runner, err := docker.NewServiceRunner(docker.RunOptions{ ContainerName: "fake-gcs-server", - ImageRepo: "docker.mirror.hashicorp.services/fsouza/fake-gcs-server", + ImageRepo: "fsouza/fake-gcs-server", ImageTag: version, Cmd: []string{"-scheme", "http", "-public-host", "storage.gcs.127.0.0.1.nip.io:4443"}, Ports: []string{"4443/tcp"}, diff --git a/helper/testhelpers/ldap/ldaphelper.go b/helper/testhelpers/ldap/ldaphelper.go index 7eebc134d5815..2bfb487c49ba9 100644 --- a/helper/testhelpers/ldap/ldaphelper.go +++ b/helper/testhelpers/ldap/ldaphelper.go @@ -1,30 +1,20 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldap import ( "context" "fmt" - "runtime" - "strings" "testing" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/helper/ldaputil" ) func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ldaputil.ConfigEntry) { - // Skipping on ARM, as this image can't run on ARM architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - runner, err := docker.NewServiceRunner(docker.RunOptions{ // Currently set to "michelvocks" until https://github.com/rroemhild/docker-test-openldap/pull/14 // has been merged. - ImageRepo: "docker.mirror.hashicorp.services/michelvocks/docker-test-openldap", + ImageRepo: "michelvocks/docker-test-openldap", ImageTag: version, ContainerName: "ldap", Ports: []string{"389/tcp"}, diff --git a/helper/testhelpers/logical/testing.go b/helper/testhelpers/logical/testing.go index f634be2f39b84..4740ff6be370a 100644 --- a/helper/testhelpers/logical/testing.go +++ b/helper/testhelpers/logical/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package testing import ( @@ -12,11 +9,11 @@ import ( "sort" "testing" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -169,7 +166,7 @@ func Test(tt TestT, c TestCase) { config := &vault.CoreConfig{ Physical: phys, DisableMlock: true, - BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + BuiltinRegistry: vault.NewMockBuiltinRegistry(), } if c.LogicalBackend != nil || c.LogicalFactory != nil { diff --git a/helper/testhelpers/logical/testing_test.go b/helper/testhelpers/logical/testing_test.go index 9f2d74b72f10b..5a4096bfc74b5 100644 --- a/helper/testhelpers/logical/testing_test.go +++ b/helper/testhelpers/logical/testing_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package testing import ( diff --git a/helper/testhelpers/minimal/minimal.go b/helper/testhelpers/minimal/minimal.go deleted file mode 100644 index a13ddf0c01411..0000000000000 --- a/helper/testhelpers/minimal/minimal.go +++ /dev/null @@ -1,81 +0,0 @@ -package minimal - -import ( - "github.com/hashicorp/go-hclog" - logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" - "github.com/hashicorp/vault/audit" - auditFile "github.com/hashicorp/vault/builtin/audit/file" - auditSocket "github.com/hashicorp/vault/builtin/audit/socket" - auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" - logicalDb "github.com/hashicorp/vault/builtin/logical/database" - "github.com/hashicorp/vault/builtin/plugin" - "github.com/hashicorp/vault/helper/builtinplugins" - "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/go-testing-interface" -) - -// NewTestSoloCluster is a simpler version of NewTestCluster that only creates -// single-node clusters. It is intentionally minimalist, if you need something -// from vault.TestClusterOptions, use NewTestCluster instead. It should work fine -// with a nil config argument. There is no need to call Start or Cleanup or -// TestWaitActive on the resulting cluster. -func NewTestSoloCluster(t testing.T, config *vault.CoreConfig) *vault.TestCluster { - logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name()) - - mycfg := &vault.CoreConfig{} - - if config != nil { - // It's rude to modify an input argument as a side-effect - copy, err := copystructure.Copy(config) - if err != nil { - t.Fatal(err) - } - mycfg = copy.(*vault.CoreConfig) - } - if mycfg.Physical == nil { - // Don't use NewTransactionalInmem because that would enable replication, - // which we don't care about in our case (use NewTestCluster for that.) - inm, err := inmem.NewInmem(nil, logger) - if err != nil { - t.Fatal(err) - } - mycfg.Physical = inm - } - if mycfg.CredentialBackends == nil { - mycfg.CredentialBackends = map[string]logical.Factory{ - "plugin": plugin.Factory, - } - } - if mycfg.LogicalBackends == nil { - mycfg.LogicalBackends = map[string]logical.Factory{ - "plugin": plugin.Factory, - "database": logicalDb.Factory, - // This is also available in the plugin catalog, but is here due to the need to - // automatically mount it. - "kv": logicalKv.Factory, - } - } - if mycfg.AuditBackends == nil { - mycfg.AuditBackends = map[string]audit.Factory{ - "file": auditFile.Factory, - "socket": auditSocket.Factory, - "syslog": auditSyslog.Factory, - } - } - if mycfg.BuiltinRegistry == nil { - mycfg.BuiltinRegistry = builtinplugins.Registry - } - - cluster := vault.NewTestCluster(t, mycfg, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: http.Handler, - Logger: logger, - }) - t.Cleanup(cluster.Cleanup) - return cluster -} diff --git a/helper/testhelpers/minio/miniohelper.go b/helper/testhelpers/minio/miniohelper.go index 5550a12106a36..c537585059931 100644 --- a/helper/testhelpers/minio/miniohelper.go +++ b/helper/testhelpers/minio/miniohelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package minio import ( @@ -14,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) type Config struct { @@ -35,7 +32,7 @@ func PrepareTestContainer(t *testing.T, version string) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ ContainerName: "minio", - ImageRepo: "docker.mirror.hashicorp.services/minio/minio", + ImageRepo: "minio/minio", ImageTag: version, Env: []string{ "MINIO_ACCESS_KEY=" + accessKeyID, diff --git a/helper/testhelpers/mongodb/mongodbhelper.go b/helper/testhelpers/mongodb/mongodbhelper.go index 1f7afe30c3139..3dff8484b343c 100644 --- a/helper/testhelpers/mongodb/mongodbhelper.go +++ b/helper/testhelpers/mongodb/mongodbhelper.go @@ -1,19 +1,20 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import ( "context" + "crypto/tls" + "errors" "fmt" + "net" + "net/url" "os" + "strconv" + "strings" "testing" "time" - "github.com/hashicorp/vault/sdk/helper/docker" - "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/readpref" + "github.com/hashicorp/vault/helper/testhelpers/docker" + "gopkg.in/mgo.v2" ) // PrepareTestContainer calls PrepareTestContainerWithDatabase without a @@ -30,10 +31,9 @@ func PrepareTestContainerWithDatabase(t *testing.T, version, dbName string) (fun } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ContainerName: "mongo", - ImageRepo: "docker.mirror.hashicorp.services/library/mongo", - ImageTag: version, - Ports: []string{"27017/tcp"}, + ImageRepo: "mongo", + ImageTag: version, + Ports: []string{"27017/tcp"}, }) if err != nil { t.Fatalf("could not start docker mongo: %s", err) @@ -44,16 +44,22 @@ func PrepareTestContainerWithDatabase(t *testing.T, version, dbName string) (fun if dbName != "" { connURL = fmt.Sprintf("%s/%s", connURL, dbName) } + dialInfo, err := ParseMongoURL(connURL) + if err != nil { + return nil, err + } - ctx, _ = context.WithTimeout(context.Background(), 1*time.Minute) - client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL)) + session, err := mgo.DialWithInfo(dialInfo) if err != nil { return nil, err } + defer session.Close() - err = client.Ping(ctx, readpref.Primary()) - if err = client.Disconnect(ctx); err != nil { - t.Fatal() + session.SetSyncTimeout(1 * time.Minute) + session.SetSocketTimeout(1 * time.Minute) + err = session.Ping() + if err != nil { + return nil, err } return docker.NewServiceURLParse(connURL) @@ -64,3 +70,72 @@ func PrepareTestContainerWithDatabase(t *testing.T, version, dbName string) (fun return svc.Cleanup, svc.Config.URL().String() } + +// ParseMongoURL will parse a connection string and return a configured dialer +func ParseMongoURL(rawURL string) (*mgo.DialInfo, error) { + url, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + info := mgo.DialInfo{ + Addrs: strings.Split(url.Host, ","), + Database: strings.TrimPrefix(url.Path, "/"), + Timeout: 10 * time.Second, + } + + if url.User != nil { + info.Username = url.User.Username() + info.Password, _ = url.User.Password() + } + + query := url.Query() + for key, values := range query { + var value string + if len(values) > 0 { + value = values[0] + } + + switch key { + case "authSource": + info.Source = value + case "authMechanism": + info.Mechanism = value + case "gssapiServiceName": + info.Service = value + case "replicaSet": + info.ReplicaSetName = value + case "maxPoolSize": + poolLimit, err := strconv.Atoi(value) + if err != nil { + return nil, errors.New("bad value for maxPoolSize: " + value) + } + info.PoolLimit = poolLimit + case "ssl": + // Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that + // ourselves. See https://github.com/go-mgo/mgo/issues/84 + ssl, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.New("bad value for ssl: " + value) + } + if ssl { + info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { + return tls.Dial("tcp", addr.String(), &tls.Config{}) + } + } + case "connect": + if value == "direct" { + info.Direct = true + break + } + if value == "replicaSet" { + break + } + fallthrough + default: + return nil, errors.New("unsupported connection URL option: " + key + "=" + value) + } + } + + return &info, nil +} diff --git a/helper/testhelpers/mssql/mssqlhelper.go b/helper/testhelpers/mssql/mssqlhelper.go index 908e0277cc2d5..01bfd54b53872 100644 --- a/helper/testhelpers/mssql/mssqlhelper.go +++ b/helper/testhelpers/mssql/mssqlhelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mssqlhelper import ( @@ -9,11 +6,9 @@ import ( "fmt" "net/url" "os" - "runtime" - "strings" "testing" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) const mssqlPassword = "yourStrong(!)Password" @@ -24,10 +19,6 @@ const mssqlPassword = "yourStrong(!)Password" const numRetries = 3 func PrepareMSSQLTestContainer(t *testing.T) (cleanup func(), retURL string) { - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - if os.Getenv("MSSQL_URL") != "" { return func() {}, os.Getenv("MSSQL_URL") } diff --git a/helper/testhelpers/mysql/mysqlhelper.go b/helper/testhelpers/mysql/mysqlhelper.go index 19887a93e2ce9..145c91e253c1e 100644 --- a/helper/testhelpers/mysql/mysqlhelper.go +++ b/helper/testhelpers/mysql/mysqlhelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysqlhelper import ( @@ -8,11 +5,10 @@ import ( "database/sql" "fmt" "os" - "runtime" "strings" "testing" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) type Config struct { @@ -27,23 +23,16 @@ func PrepareTestContainer(t *testing.T, legacy bool, pw string) (func(), string) return func() {}, os.Getenv("MYSQL_URL") } - // ARM64 is only supported on MySQL 8.0 and above. If we update - // our image and support to 8.0, we can unskip these tests. - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as MySQL 5.7 is not supported on ARM architectures") - } - imageVersion := "5.7" if legacy { imageVersion = "5.6" } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ContainerName: "mysql", - ImageRepo: "docker.mirror.hashicorp.services/library/mysql", - ImageTag: imageVersion, - Ports: []string{"3306/tcp"}, - Env: []string{"MYSQL_ROOT_PASSWORD=" + pw}, + ImageRepo: "mysql", + ImageTag: imageVersion, + Ports: []string{"3306/tcp"}, + Env: []string{"MYSQL_ROOT_PASSWORD=" + pw}, }) if err != nil { t.Fatalf("could not start docker mysql: %s", err) diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go deleted file mode 100644 index e9a0067044927..0000000000000 --- a/helper/testhelpers/pluginhelpers/pluginhelpers.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Package pluginhelpers contains testhelpers that don't depend on package -// vault, and thus can be used within vault (as well as elsewhere.) -package pluginhelpers - -import ( - "crypto/sha256" - "fmt" - "os" - "os/exec" - "path" - "path/filepath" - "sync" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/go-testing-interface" -) - -var ( - testPluginCacheLock sync.Mutex - testPluginCache = map[string][]byte{} -) - -type TestPlugin struct { - Name string - Typ consts.PluginType - Version string - FileName string - Sha256 string -} - -func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, string) { - t.Helper() - var pluginName string - var pluginType string - var pluginMain string - var pluginVersionLocation string - - switch typ { - case consts.PluginTypeCredential: - pluginType = "approle" - pluginName = "vault-plugin-auth-" + pluginType - pluginMain = filepath.Join("builtin", "credential", pluginType, "cmd", pluginType, "main.go") - pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/builtin/credential/%s.ReportedVersion", pluginType) - case consts.PluginTypeSecrets: - pluginType = "consul" - pluginName = "vault-plugin-secrets-" + pluginType - pluginMain = filepath.Join("builtin", "logical", pluginType, "cmd", pluginType, "main.go") - pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/builtin/logical/%s.ReportedVersion", pluginType) - case consts.PluginTypeDatabase: - pluginType = "postgresql" - pluginName = "vault-plugin-database-" + pluginType - pluginMain = filepath.Join("plugins", "database", pluginType, fmt.Sprintf("%s-database-plugin", pluginType), "main.go") - pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/plugins/database/%s.ReportedVersion", pluginType) - default: - t.Fatal(typ.String()) - } - return pluginName, pluginType, pluginMain, pluginVersionLocation -} - -// to mount a plugin, we need a working binary plugin, so we compile one here. -// pluginVersion is used to override the plugin's self-reported version -func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin { - t.Helper() - - pluginName, pluginType, pluginMain, pluginVersionLocation := GetPlugin(t, typ) - - testPluginCacheLock.Lock() - defer testPluginCacheLock.Unlock() - - var pluginBytes []byte - - dir := "" - var err error - pluginRootDir := "builtin" - if typ == consts.PluginTypeDatabase { - pluginRootDir = "plugins" - } - for { - dir, err = os.Getwd() - if err != nil { - t.Fatal(err) - } - // detect if we are in a subdirectory or the root directory and compensate - if _, err := os.Stat(pluginRootDir); os.IsNotExist(err) { - err := os.Chdir("..") - if err != nil { - t.Fatal(err) - } - } else { - break - } - } - - pluginPath := path.Join(pluginDir, pluginName) - if pluginVersion != "" { - pluginPath += "-" + pluginVersion - } - - key := fmt.Sprintf("%s %s %s", pluginName, pluginType, pluginVersion) - // cache the compilation to only run once - var ok bool - pluginBytes, ok = testPluginCache[key] - if !ok { - // we need to compile - line := []string{"build"} - if pluginVersion != "" { - line = append(line, "-ldflags", fmt.Sprintf("-X %s=%s", pluginVersionLocation, pluginVersion)) - } - line = append(line, "-o", pluginPath, pluginMain) - cmd := exec.Command("go", line...) - cmd.Dir = dir - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatal(fmt.Errorf("error running go build %v output: %s", err, output)) - } - testPluginCache[key], err = os.ReadFile(pluginPath) - if err != nil { - t.Fatal(err) - } - pluginBytes = testPluginCache[key] - } - - // write the cached plugin if necessary - if _, err := os.Stat(pluginPath); os.IsNotExist(err) { - err = os.WriteFile(pluginPath, pluginBytes, 0o755) - } - if err != nil { - t.Fatal(err) - } - - sha := sha256.New() - _, err = sha.Write(pluginBytes) - if err != nil { - t.Fatal(err) - } - return TestPlugin{ - Name: pluginName, - Typ: typ, - Version: pluginVersion, - FileName: path.Base(pluginPath), - Sha256: fmt.Sprintf("%x", sha.Sum(nil)), - } -} diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go index 4f90177cc2cbd..2f9b2c2a1472c 100644 --- a/helper/testhelpers/postgresql/postgresqlhelper.go +++ b/helper/testhelpers/postgresql/postgresqlhelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package postgresql import ( @@ -11,98 +8,48 @@ import ( "os" "testing" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" ) func PrepareTestContainer(t *testing.T, version string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=database", - } - - _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) - - return cleanup, url -} - -// PrepareTestContainerWithVaultUser will setup a test container with a Vault -// admin user configured so that we can safely call rotate-root without -// rotating the root DB credentials -func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context, version string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=database", - } - - runner, cleanup, url, id := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) - - cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} - _, err := runner.RunCmdInBackground(ctx, id, cmd) - if err != nil { - t.Fatalf("Could not run command (%v) in container: %v", cmd, err) - } - - return cleanup, url + return prepareTestContainer(t, version, "secret", "database") } func PrepareTestContainerWithPassword(t *testing.T, version, password string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=" + password, - "POSTGRES_DB=database", - } - - _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, password, true, false, false, env) - - return cleanup, url -} - -func PrepareTestContainerRepmgr(t *testing.T, name, version string, envVars []string) (*docker.Runner, func(), string, string) { - env := append(envVars, - "REPMGR_PARTNER_NODES=psql-repl-node-0,psql-repl-node-1", - "REPMGR_PRIMARY_HOST=psql-repl-node-0", - "REPMGR_PASSWORD=repmgrpass", - "POSTGRESQL_PASSWORD=secret") - - return prepareTestContainer(t, name, "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr", version, "secret", false, true, true, env) + return prepareTestContainer(t, version, password, "database") } -func prepareTestContainer(t *testing.T, name, repo, version, password string, - addSuffix, forceLocalAddr, doNotAutoRemove bool, envVars []string, -) (*docker.Runner, func(), string, string) { +func prepareTestContainer(t *testing.T, version, password, db string) (func(), string) { if os.Getenv("PG_URL") != "" { - return nil, func() {}, "", os.Getenv("PG_URL") + return func() {}, os.Getenv("PG_URL") } if version == "" { version = "11" } - runOpts := docker.RunOptions{ - ContainerName: name, - ImageRepo: repo, - ImageTag: version, - Env: envVars, - Ports: []string{"5432/tcp"}, - DoNotAutoRemove: doNotAutoRemove, - } - if repo == "bitnami/postgresql-repmgr" { - runOpts.NetworkID = os.Getenv("POSTGRES_MULTIHOST_NET") - } - - runner, err := docker.NewServiceRunner(runOpts) + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "postgres", + ImageTag: version, + Env: []string{ + "POSTGRES_PASSWORD=" + password, + "POSTGRES_DB=" + db, + }, + Ports: []string{"5432/tcp"}, + }) if err != nil { t.Fatalf("Could not start docker Postgres: %s", err) } - svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, repo)) + svc, err := runner.StartService(context.Background(), connectPostgres(password)) if err != nil { t.Fatalf("Could not start docker Postgres: %s", err) } - return runner, svc.Cleanup, svc.Config.URL().String(), containerID + return svc.Cleanup, svc.Config.URL().String() } -func connectPostgres(password, repo string) docker.ServiceAdapter { +func connectPostgres(password string) docker.ServiceAdapter { return func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { u := url.URL{ Scheme: "postgres", @@ -118,21 +65,10 @@ func connectPostgres(password, repo string) docker.ServiceAdapter { } defer db.Close() - if err = db.Ping(); err != nil { + err = db.Ping() + if err != nil { return nil, err } return docker.NewServiceURL(u), nil } } - -func StopContainer(t *testing.T, ctx context.Context, runner *docker.Runner, containerID string) { - if err := runner.Stop(ctx, containerID); err != nil { - t.Fatalf("Could not stop docker Postgres: %s", err) - } -} - -func RestartContainer(t *testing.T, ctx context.Context, runner *docker.Runner, containerID string) { - if err := runner.Restart(ctx, containerID); err != nil { - t.Fatalf("Could not restart docker Postgres: %s", err) - } -} diff --git a/helper/testhelpers/seal/sealhelper.go b/helper/testhelpers/seal/sealhelper.go index b05401f1cdbf6..4087f6fc0d926 100644 --- a/helper/testhelpers/seal/sealhelper.go +++ b/helper/testhelpers/seal/sealhelper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package sealhelper import ( @@ -74,5 +71,7 @@ func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, err t.Fatalf("error setting wrapper config: %v", err) } - return vault.NewAutoSeal(seal.NewAccess(transitSeal)) + return vault.NewAutoSeal(&seal.Access{ + Wrapper: transitSeal, + }) } diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 6ae51602bfdd8..f47ba435ca369 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package testhelpers import ( @@ -12,8 +9,6 @@ import ( "io/ioutil" "math/rand" "net/url" - "os" - "strings" "sync/atomic" "time" @@ -36,7 +31,7 @@ const ( GenerateRecovery ) -// GenerateRoot generates a root token on the target cluster. +// Generates a root token on the target cluster. func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string { t.Helper() token, err := GenerateRootWithError(t, cluster, kind) @@ -56,9 +51,6 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind Generat keys = cluster.BarrierKeys } client := cluster.Cores[0].Client - oldNS := client.Namespace() - defer client.SetNamespace(oldNS) - client.ClearNamespace() var err error var status *api.GenerateRootStatusResponse @@ -180,10 +172,6 @@ func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error } client := core.Client - oldNS := client.Namespace() - defer client.SetNamespace(oldNS) - client.ClearNamespace() - client.Sys().ResetUnsealProcess() for j := 0; j < len(c.BarrierKeys); j++ { statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) @@ -252,10 +240,7 @@ func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestCluste t.Helper() for i := 0; i < 60; i++ { for _, core := range cluster.Cores { - oldNS := core.Client.Namespace() - core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() - core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -273,10 +258,7 @@ func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestCl t.Helper() cores := make([]*vault.TestClusterCore, 0, 2) for _, core := range cluster.Cores { - oldNS := core.Client.Namespace() - core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() - core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -478,7 +460,7 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { leaderInfos := []*raft.LeaderJoinInfo{ { LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig(), + TLSConfig: leader.TLSConfig, }, } @@ -612,16 +594,18 @@ func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} { t.Helper() stopCh := make(chan struct{}) + ticker := time.NewTicker(time.Second) + var err error go func() { - ticker := time.NewTicker(time.Second) - defer ticker.Stop() for { select { case <-stopCh: + ticker.Stop() + stopCh <- struct{}{} return case <-ticker.C: - err := client.Sys().Mount("foo", &api.MountInput{ + err = client.Sys().Mount("foo", &api.MountInput{ Type: "kv", Options: map[string]string{ "version": "1", @@ -781,21 +765,6 @@ func SetNonRootToken(client *api.Client) error { return nil } -// RetryUntilAtCadence runs f until it returns a nil result or the timeout is reached. -// If a nil result hasn't been obtained by timeout, calls t.Fatal. -func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func() error) { - t.Helper() - deadline := time.Now().Add(timeout) - var err error - for time.Now().Before(deadline) { - if err = f(); err == nil { - return - } - time.Sleep(sleepTime) - } - t.Fatalf("did not complete before deadline, err: %v", err) -} - // RetryUntil runs f until it returns a nil result or the timeout is reached. // If a nil result hasn't been obtained by timeout, calls t.Fatal. func RetryUntil(t testing.T, timeout time.Duration, f func() error) { @@ -971,7 +940,7 @@ func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) s // SetupLoginMFATOTP setups up a TOTP MFA using some basic configuration and // returns all relevant information to the client. -func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) { +func SetupLoginMFATOTP(t testing.T, client *api.Client) (*api.Client, string, string) { t.Helper() // Mount the totp secrets engine SetupTOTPMount(t, client) @@ -985,14 +954,13 @@ func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitP // Configure a default TOTP method totpConfig := map[string]interface{}{ "issuer": "yCorp", - "period": waitPeriod, + "period": 20, "algorithm": "SHA256", "digits": 6, "skew": 1, "key_size": 20, "qr_size": 200, "max_validation_attempts": 5, - "method_name": methodName, } methodID := SetupTOTPMethod(t, client, totpConfig) @@ -1006,50 +974,3 @@ func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitP SetupMFALoginEnforcement(t, client, enforcementConfig) return entityClient, entityID, methodID } - -func SkipUnlessEnvVarsSet(t testing.T, envVars []string) { - t.Helper() - - for _, i := range envVars { - if os.Getenv(i) == "" { - t.Skipf("%s must be set for this test to run", strings.Join(envVars, " ")) - } - } -} - -// WaitForNodesExcludingSelectedStandbys is variation on WaitForActiveNodeAndStandbys. -// It waits for the active node before waiting for standby nodes, however -// it will not wait for cores with indexes that match those specified as arguments. -// Whilst you could specify index 0 which is likely to be the leader node, the function -// checks for the leader first regardless of the indexes to skip, so it would be redundant to do so. -// The intention/use case for this function is to allow a cluster to start and become active with one -// or more nodes not joined, so that we can test scenarios where a node joins later. -// e.g. 4 nodes in the cluster, only 3 nodes in cluster 'active', 1 node can be joined later in tests. -func WaitForNodesExcludingSelectedStandbys(t testing.T, cluster *vault.TestCluster, indexesToSkip ...int) { - WaitForActiveNode(t, cluster) - - contains := func(elems []int, e int) bool { - for _, v := range elems { - if v == e { - return true - } - } - - return false - } - for i, core := range cluster.Cores { - if contains(indexesToSkip, i) { - continue - } - - if standby, _ := core.Core.Standby(); standby { - WaitForStandbyNode(t, core) - } - } -} - -// IsLocalOrRegressionTests returns true when the tests are running locally (not in CI), or when -// the regression test env var (VAULT_REGRESSION_TESTS) is provided. -func IsLocalOrRegressionTests() bool { - return os.Getenv("CI") == "" || os.Getenv("VAULT_REGRESSION_TESTS") == "true" -} diff --git a/helper/testhelpers/testhelpers_oss.go b/helper/testhelpers/testhelpers_oss.go index fc55e9b52c736..912d50fdec3b0 100644 --- a/helper/testhelpers/testhelpers_oss.go +++ b/helper/testhelpers/testhelpers_oss.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !enterprise package testhelpers diff --git a/helper/testhelpers/teststorage/consul/consul.go b/helper/testhelpers/teststorage/consul/consul.go index bfea5ddbb4aeb..47ec99f294b76 100644 --- a/helper/testhelpers/teststorage/consul/consul.go +++ b/helper/testhelpers/teststorage/consul/consul.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go index f065e187d2c28..7a2f220ed7e82 100644 --- a/helper/testhelpers/teststorage/teststorage.go +++ b/helper/testhelpers/teststorage/teststorage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package teststorage import ( diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index ff9fd2b55668b..257a5a0184c5f 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package teststorage import ( diff --git a/helper/timeutil/timeutil.go b/helper/timeutil/timeutil.go index 16f8343513e90..a65d3cf908bcb 100644 --- a/helper/timeutil/timeutil.go +++ b/helper/timeutil/timeutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package timeutil import ( @@ -142,26 +139,3 @@ func SkipAtEndOfMonth(t *testing.T) { t.Skip("too close to end of month") } } - -// This interface allows unit tests to substitute in a simulated Clock. -type Clock interface { - Now() time.Time - NewTicker(time.Duration) *time.Ticker - NewTimer(time.Duration) *time.Timer -} - -type DefaultClock struct{} - -var _ Clock = (*DefaultClock)(nil) - -func (_ DefaultClock) Now() time.Time { - return time.Now() -} - -func (_ DefaultClock) NewTicker(d time.Duration) *time.Ticker { - return time.NewTicker(d) -} - -func (_ DefaultClock) NewTimer(d time.Duration) *time.Timer { - return time.NewTimer(d) -} diff --git a/helper/timeutil/timeutil_test.go b/helper/timeutil/timeutil_test.go index b9ccdbd5ba226..5cef2d2061fe9 100644 --- a/helper/timeutil/timeutil_test.go +++ b/helper/timeutil/timeutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package timeutil import ( diff --git a/helper/useragent/useragent.go b/helper/useragent/useragent.go deleted file mode 100644 index a16b8716e352e..0000000000000 --- a/helper/useragent/useragent.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package useragent - -import ( - "fmt" - "runtime" - - "github.com/hashicorp/vault/version" -) - -var ( - // projectURL is the project URL. - projectURL = "https://www.vaultproject.io/" - - // rt is the runtime - variable for tests. - rt = runtime.Version() - - // versionFunc is the func that returns the current version. This is a - // function to take into account the different build processes and distinguish - // between enterprise and oss builds. - versionFunc = func() string { - return version.GetVersion().VersionNumber() - } -) - -// String returns the consistent user-agent string for Vault. -// -// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func String() string { - return fmt.Sprintf("Vault/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// AgentString returns the consistent user-agent string for Vault Agent. -// -// e.g. Vault Agent/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func AgentString() string { - return fmt.Sprintf("Vault Agent/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// AgentTemplatingString returns the consistent user-agent string for Vault Agent Templating. -// -// e.g. Vault Agent Templating/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func AgentTemplatingString() string { - return fmt.Sprintf("Vault Agent Templating/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// AgentProxyString returns the consistent user-agent string for Vault Agent API Proxying. -// -// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func AgentProxyString() string { - return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// AgentProxyStringWithProxiedUserAgent returns the consistent user-agent -// string for Vault Agent API Proxying, keeping the User-Agent of the proxied -// client as an extension to this UserAgent -// -// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent -func AgentProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { - return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s); %s", - versionFunc(), projectURL, rt, proxiedUserAgent) -} - -// AgentAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. -// -// e.g. Vault Agent Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func AgentAutoAuthString() string { - return fmt.Sprintf("Vault Agent Auto-Auth/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// ProxyString returns the consistent user-agent string for Vault Proxy. -// -// e.g. Vault Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func ProxyString() string { - return fmt.Sprintf("Vault Proxy/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// ProxyAPIProxyString returns the consistent user-agent string for Vault Proxy API Proxying. -// -// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func ProxyAPIProxyString() string { - return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} - -// ProxyStringWithProxiedUserAgent returns the consistent user-agent -// string for Vault Proxy API Proxying, keeping the User-Agent of the proxied -// client as an extension to this UserAgent -// -// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent -func ProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { - return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s); %s", - versionFunc(), projectURL, rt, proxiedUserAgent) -} - -// ProxyAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. -// -// e.g. Vault Proxy Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) -func ProxyAutoAuthString() string { - return fmt.Sprintf("Vault Proxy Auto-Auth/%s (+%s; %s)", - versionFunc(), projectURL, rt) -} diff --git a/helper/useragent/useragent_test.go b/helper/useragent/useragent_test.go deleted file mode 100644 index af5e2f0622745..0000000000000 --- a/helper/useragent/useragent_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package useragent - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestUserAgent(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := String() - - exp := "Vault/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultAgent tests the AgentString() function works -// as expected -func TestUserAgent_VaultAgent(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := AgentString() - - exp := "Vault Agent/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultAgentTemplating tests the AgentTemplatingString() function works -// as expected -func TestUserAgent_VaultAgentTemplating(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := AgentTemplatingString() - - exp := "Vault Agent Templating/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultAgentProxy tests the AgentProxyString() function works -// as expected -func TestUserAgent_VaultAgentProxy(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := AgentProxyString() - - exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultAgentProxyWithProxiedUserAgent tests the AgentProxyStringWithProxiedUserAgent() -// function works as expected -func TestUserAgent_VaultAgentProxyWithProxiedUserAgent(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - userAgent := "my-user-agent" - - act := AgentProxyStringWithProxiedUserAgent(userAgent) - - exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultAgentAutoAuth tests the AgentAutoAuthString() function works -// as expected -func TestUserAgent_VaultAgentAutoAuth(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := AgentAutoAuthString() - - exp := "Vault Agent Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultProxy tests the ProxyString() function works -// as expected -func TestUserAgent_VaultProxy(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := ProxyString() - - exp := "Vault Proxy/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultProxyAPIProxy tests the ProxyAPIProxyString() function works -// as expected -func TestUserAgent_VaultProxyAPIProxy(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := ProxyAPIProxyString() - - exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultProxyWithProxiedUserAgent tests the ProxyStringWithProxiedUserAgent() -// function works as expected -func TestUserAgent_VaultProxyWithProxiedUserAgent(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - userAgent := "my-user-agent" - - act := ProxyStringWithProxiedUserAgent(userAgent) - - exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" - require.Equal(t, exp, act) -} - -// TestUserAgent_VaultProxyAutoAuth tests the ProxyAPIProxyString() function works -// as expected -func TestUserAgent_VaultProxyAutoAuth(t *testing.T) { - projectURL = "https://vault-test.com" - rt = "go5.0" - versionFunc = func() string { return "1.2.3" } - - act := ProxyAutoAuthString() - - exp := "Vault Proxy Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" - require.Equal(t, exp, act) -} diff --git a/helper/versions/version.go b/helper/versions/version.go index 9eb8077b89230..6bbd186984148 100644 --- a/helper/versions/version.go +++ b/helper/versions/version.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package versions import ( @@ -11,7 +8,7 @@ import ( semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" ) const ( diff --git a/helper/versions/version_test.go b/helper/versions/version_test.go index 85b46cdd3fbf6..cc1b3e1c20f2f 100644 --- a/helper/versions/version_test.go +++ b/helper/versions/version_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package versions import "testing" diff --git a/http/assets.go b/http/assets.go index b60a594942c7d..c401f94910872 100644 --- a/http/assets.go +++ b/http/assets.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build ui package http diff --git a/http/assets_stub.go b/http/assets_stub.go index e1b4daf3991e1..1989a09d9860c 100644 --- a/http/assets_stub.go +++ b/http/assets_stub.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !ui package http diff --git a/http/auth_token_test.go b/http/auth_token_test.go index d96e18383ac71..552a32cbdd937 100644 --- a/http/auth_token_test.go +++ b/http/auth_token_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/cors.go b/http/cors.go index 7e8c311e624f5..74cfeeaef072e 100644 --- a/http/cors.go +++ b/http/cors.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/custom_header_test.go b/http/custom_header_test.go index 289379a84cb6d..8c204584aeb78 100644 --- a/http/custom_header_test.go +++ b/http/custom_header_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/events.go b/http/events.go deleted file mode 100644 index 072fcd60ea04a..0000000000000 --- a/http/events.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package http - -import ( - "context" - "errors" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/eventbus" - "nhooyr.io/websocket" -) - -type eventSubscribeArgs struct { - ctx context.Context - logger hclog.Logger - events *eventbus.EventBus - ns *namespace.Namespace - pattern string - conn *websocket.Conn - json bool -} - -// handleEventsSubscribeWebsocket runs forever, returning a websocket error code and reason -// only if the connection closes or there was an error. -func handleEventsSubscribeWebsocket(args eventSubscribeArgs) (websocket.StatusCode, string, error) { - ctx := args.ctx - logger := args.logger - ch, cancel, err := args.events.Subscribe(ctx, args.ns, args.pattern) - if err != nil { - logger.Info("Error subscribing", "error", err) - return websocket.StatusUnsupportedData, "Error subscribing", nil - } - defer cancel() - - for { - select { - case <-ctx.Done(): - logger.Info("Websocket context is done, closing the connection") - return websocket.StatusNormalClosure, "", nil - case message := <-ch: - logger.Debug("Sending message to websocket", "message", message.Payload) - var messageBytes []byte - var messageType websocket.MessageType - if args.json { - var ok bool - messageBytes, ok = message.Format("cloudevents-json") - if !ok { - logger.Warn("Could not get cloudevents JSON format") - return 0, "", errors.New("could not get cloudevents JSON format") - } - messageType = websocket.MessageText - } else { - messageBytes, err = proto.Marshal(message.Payload.(*logical.EventReceived)) - messageType = websocket.MessageBinary - } - if err != nil { - logger.Warn("Could not serialize websocket event", "error", err) - return 0, "", err - } - err = args.conn.Write(ctx, messageType, messageBytes) - if err != nil { - return 0, "", err - } - } - } -} - -func handleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logger := core.Logger().Named("events-subscribe") - logger.Debug("Got request to", "url", r.URL, "version", r.Proto) - - ctx := r.Context() - - // ACL check - _, _, err := core.CheckToken(ctx, req, false) - if err != nil { - if errors.Is(err, logical.ErrPermissionDenied) { - respondError(w, http.StatusForbidden, logical.ErrPermissionDenied) - return - } - logger.Debug("Error validating token", "error", err) - respondError(w, http.StatusInternalServerError, fmt.Errorf("error validating token")) - return - } - - ns, err := namespace.FromContext(ctx) - if err != nil { - logger.Info("Could not find namespace", "error", err) - respondError(w, http.StatusInternalServerError, fmt.Errorf("could not find namespace")) - return - } - - prefix := "/v1/sys/events/subscribe/" - if ns.ID != namespace.RootNamespaceID { - prefix = fmt.Sprintf("/v1/%ssys/events/subscribe/", ns.Path) - } - pattern := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, prefix)) - if pattern == "" { - respondError(w, http.StatusBadRequest, fmt.Errorf("did not specify eventType to subscribe to")) - return - } - - json := false - jsonRaw := r.URL.Query().Get("json") - if jsonRaw != "" { - var err error - json, err = strconv.ParseBool(jsonRaw) - if err != nil { - respondError(w, http.StatusBadRequest, fmt.Errorf("invalid parameter for JSON: %v", jsonRaw)) - return - } - } - - conn, err := websocket.Accept(w, r, nil) - if err != nil { - logger.Info("Could not accept as websocket", "error", err) - respondError(w, http.StatusInternalServerError, fmt.Errorf("could not accept as websocket")) - return - } - - // we don't expect any incoming messages - ctx = conn.CloseRead(ctx) - // start the pinger - go func() { - for { - time.Sleep(30 * time.Second) // not too aggressive, but keep the HTTP connection alive - err := conn.Ping(ctx) - if err != nil { - return - } - } - }() - - closeStatus, closeReason, err := handleEventsSubscribeWebsocket(eventSubscribeArgs{ctx, logger, core.Events(), ns, pattern, conn, json}) - if err != nil { - closeStatus = websocket.CloseStatus(err) - if closeStatus == -1 { - closeStatus = websocket.StatusInternalError - } - closeReason = fmt.Sprintf("Internal error: %v", err) - logger.Debug("Error from websocket handler", "error", err) - } - // Close() will panic if the reason is greater than this length - if len(closeReason) > 123 { - logger.Debug("Truncated close reason", "closeReason", closeReason) - closeReason = closeReason[:123] - } - err = conn.Close(closeStatus, closeReason) - if err != nil { - logger.Debug("Error closing websocket", "error", err) - } - }) -} diff --git a/http/events_test.go b/http/events_test.go deleted file mode 100644 index b5ce0a1a3581c..0000000000000 --- a/http/events_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "nhooyr.io/websocket" -) - -// TestEventsSubscribe tests the websocket endpoint for subscribing to events -// by generating some events. -func TestEventsSubscribe(t *testing.T) { - core := vault.TestCore(t) - ln, addr := TestServer(t, core) - defer ln.Close() - - // unseal the core - keys, token := vault.TestCoreInit(t, core) - for _, key := range keys { - _, err := core.Unseal(key) - if err != nil { - t.Fatal(err) - } - } - - stop := atomic.Bool{} - - const eventType = "abc" - - // send some events - go func() { - for !stop.Load() { - id, err := uuid.GenerateUUID() - if err != nil { - core.Logger().Info("Error generating UUID, exiting sender", "error", err) - } - pluginInfo := &logical.EventPluginInfo{ - MountPath: "secret", - } - err = core.Events().SendInternal(namespace.RootContext(context.Background()), namespace.RootNamespace, pluginInfo, logical.EventType(eventType), &logical.EventData{ - Id: id, - Metadata: nil, - EntityIds: nil, - Note: "testing", - }) - if err != nil { - core.Logger().Info("Error sending event, exiting sender", "error", err) - } - time.Sleep(100 * time.Millisecond) - } - }() - - t.Cleanup(func() { - stop.Store(true) - }) - - ctx := context.Background() - wsAddr := strings.Replace(addr, "http", "ws", 1) - - testCases := []struct { - json bool - }{{true}, {false}} - - for _, testCase := range testCases { - url := fmt.Sprintf("%s/v1/sys/events/subscribe/%s?json=%v", wsAddr, eventType, testCase.json) - conn, _, err := websocket.Dial(ctx, url, &websocket.DialOptions{ - HTTPHeader: http.Header{"x-vault-token": []string{token}}, - }) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - conn.Close(websocket.StatusNormalClosure, "") - }) - - _, msg, err := conn.Read(ctx) - if err != nil { - t.Fatal(err) - } - if testCase.json { - event := map[string]interface{}{} - err = json.Unmarshal(msg, &event) - if err != nil { - t.Fatal(err) - } - t.Log(string(msg)) - data := event["data"].(map[string]interface{}) - if actualType := data["event_type"].(string); actualType != eventType { - t.Fatalf("Expeced event type %s, got %s", eventType, actualType) - } - pluginInfo, ok := data["plugin_info"].(map[string]interface{}) - if !ok || pluginInfo == nil { - t.Fatalf("No plugin_info object: %v", data) - } - mountPath, ok := pluginInfo["mount_path"].(string) - if !ok || mountPath != "secret" { - t.Fatalf("Wrong mount_path: %v", data) - } - innerEvent := data["event"].(map[string]interface{}) - if innerEvent["id"].(string) != event["id"].(string) { - t.Fatalf("IDs don't match, expected %s, got %s", innerEvent["id"].(string), event["id"].(string)) - } - if innerEvent["note"].(string) != "testing" { - t.Fatalf("Expected 'testing', got %s", innerEvent["note"].(string)) - } - - checkRequiredCloudEventsFields(t, event) - } - } -} - -func checkRequiredCloudEventsFields(t *testing.T, event map[string]interface{}) { - t.Helper() - for _, attr := range []string{"id", "source", "specversion", "type"} { - if v, ok := event[attr]; !ok { - t.Errorf("Missing attribute %s", attr) - } else if str, ok := v.(string); !ok { - t.Errorf("Expected %s to be string but got %T", attr, v) - } else if str == "" { - t.Errorf("%s was empty string", attr) - } - } -} - -// TestEventsSubscribeAuth tests that unauthenticated and unauthorized subscriptions -// fail correctly. -func TestEventsSubscribeAuth(t *testing.T) { - core := vault.TestCore(t) - ln, addr := TestServer(t, core) - defer ln.Close() - - // unseal the core - keys, root := vault.TestCoreInit(t, core) - for _, key := range keys { - _, err := core.Unseal(key) - if err != nil { - t.Fatal(err) - } - } - - var nonPrivilegedToken string - // Fetch a valid non privileged token. - { - config := api.DefaultConfig() - config.Address = addr - - client, err := api.NewClient(config) - if err != nil { - t.Fatal(err) - } - client.SetToken(root) - - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{Policies: []string{"default"}}) - if err != nil { - t.Fatal(err) - } - if secret.Auth.ClientToken == "" { - t.Fatal("Failed to fetch a non privileged token") - } - nonPrivilegedToken = secret.Auth.ClientToken - } - - ctx := context.Background() - wsAddr := strings.Replace(addr, "http", "ws", 1) - - // Get a 403 with no token. - _, resp, err := websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", nil) - if err == nil { - t.Error("Expected websocket error but got none") - } - if resp == nil || resp.StatusCode != http.StatusForbidden { - t.Errorf("Expected 403 but got %+v", resp) - } - - // Get a 403 with a non privileged token. - _, resp, err = websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", &websocket.DialOptions{ - HTTPHeader: http.Header{"x-vault-token": []string{nonPrivilegedToken}}, - }) - if err == nil { - t.Error("Expected websocket error but got none") - } - if resp == nil || resp.StatusCode != http.StatusForbidden { - t.Errorf("Expected 403 but got %+v", resp) - } -} diff --git a/http/forwarded_for_test.go b/http/forwarded_for_test.go index 89bc62acc2653..9323f5bf1c728 100644 --- a/http/forwarded_for_test.go +++ b/http/forwarded_for_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -49,7 +46,7 @@ func TestHandler_XForwardedFor(t *testing.T) { } cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: HandlerFunc(testHandler), + HandlerFunc: testHandler, }) cluster.Start() defer cluster.Cleanup() @@ -92,7 +89,7 @@ func TestHandler_XForwardedFor(t *testing.T) { } cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: HandlerFunc(testHandler), + HandlerFunc: testHandler, }) cluster.Start() defer cluster.Cleanup() @@ -128,7 +125,7 @@ func TestHandler_XForwardedFor(t *testing.T) { } cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: HandlerFunc(testHandler), + HandlerFunc: testHandler, }) cluster.Start() defer cluster.Cleanup() @@ -162,7 +159,7 @@ func TestHandler_XForwardedFor(t *testing.T) { } cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: HandlerFunc(testHandler), + HandlerFunc: testHandler, }) cluster.Start() defer cluster.Cleanup() @@ -196,7 +193,7 @@ func TestHandler_XForwardedFor(t *testing.T) { } cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: HandlerFunc(testHandler), + HandlerFunc: testHandler, }) cluster.Start() defer cluster.Cleanup() @@ -233,7 +230,7 @@ func TestHandler_XForwardedFor(t *testing.T) { } cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: HandlerFunc(testHandler), + HandlerFunc: testHandler, }) cluster.Start() defer cluster.Cleanup() diff --git a/http/forwarding_bench_test.go b/http/forwarding_bench_test.go index 0c3f5e2a286d2..f7334058d6a8f 100644 --- a/http/forwarding_bench_test.go +++ b/http/forwarding_bench_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -48,7 +45,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { host := fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[0].Listeners[0].Address.Port) transport := &http.Transport{ - TLSClientConfig: cores[0].TLSConfig(), + TLSClientConfig: cores[0].TLSConfig, } if err := http2.ConfigureTransport(transport); err != nil { b.Fatal(err) diff --git a/http/forwarding_test.go b/http/forwarding_test.go index 51cc2c0e01812..f0225a42230e0 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -56,7 +53,7 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) { for _, addr := range addrs { config := api.DefaultConfig() config.Address = addr - config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig() + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig client, err := api.NewClient(config) if err != nil { @@ -104,7 +101,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { for _, addr := range addrs { config := api.DefaultConfig() config.Address = addr - config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig() + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig client, err := api.NewClient(config) if err != nil { @@ -164,7 +161,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) } transport := &http.Transport{ - TLSClientConfig: cores[0].TLSConfig(), + TLSClientConfig: cores[0].TLSConfig, } if err := http2.ConfigureTransport(transport); err != nil { t.Fatal(err) @@ -462,7 +459,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { vault.TestWaitActive(t, core) transport := cleanhttp.DefaultTransport() - transport.TLSClientConfig = cores[0].TLSConfig() + transport.TLSClientConfig = cores[0].TLSConfig if err := http2.ConfigureTransport(transport); err != nil { t.Fatal(err) } @@ -514,7 +511,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { // be to a different address transport = cleanhttp.DefaultTransport() // i starts at zero but cores in addrs start at 1 - transport.TLSClientConfig = cores[i+1].TLSConfig() + transport.TLSClientConfig = cores[i+1].TLSConfig if err := http2.ConfigureTransport(transport); err != nil { t.Fatal(err) } diff --git a/http/handler.go b/http/handler.go index a91a0514ffd0a..5bf848db2d678 100644 --- a/http/handler.go +++ b/http/handler.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -121,25 +118,9 @@ func init() { }) } -type HandlerAnchor struct{} - -func (h HandlerAnchor) Handler(props *vault.HandlerProperties) http.Handler { - return handler(props) -} - -var Handler vault.HandlerHandler = HandlerAnchor{} - -type HandlerFunc func(props *vault.HandlerProperties) http.Handler - -func (h HandlerFunc) Handler(props *vault.HandlerProperties) http.Handler { - return h(props) -} - -var _ vault.HandlerHandler = HandlerFunc(func(props *vault.HandlerProperties) http.Handler { return nil }) - -// handler returns an http.Handler for the API. This can be used on +// Handler returns an http.Handler for the API. This can be used on // its own to mount the Vault API within another web server. -func handler(props *vault.HandlerProperties) http.Handler { +func Handler(props *vault.HandlerProperties) http.Handler { core := props.Core // Create the muxer to handle the actual endpoints @@ -178,7 +159,6 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core)) mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core)) mux.Handle("/v1/sys/internal/ui/feature-flags", handleSysInternalFeatureFlags(core)) - for _, path := range injectDataIntoTopRoutes { mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) } @@ -352,8 +332,8 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr // Start with the request context ctx := r.Context() var cancelFunc context.CancelFunc - // Add our timeout, but not for the monitor or events endpoints, as they are streaming - if strings.HasSuffix(r.URL.Path, "sys/monitor") || strings.Contains(r.URL.Path, "sys/events") { + // Add our timeout, but not for the monitor endpoint, as it's streaming + if strings.HasSuffix(r.URL.Path, "sys/monitor") { ctx, cancelFunc = context.WithCancel(ctx) } else { ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration) @@ -832,9 +812,16 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle return } path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) - if !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path) { + switch { + case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path): handler.ServeHTTP(w, r) return + case strings.HasPrefix(path, "auth/token/create/"): + isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path) + if err == nil && isBatch { + handler.ServeHTTP(w, r) + return + } } } @@ -1191,10 +1178,6 @@ func respondError(w http.ResponseWriter, status int, err error) { logical.RespondError(w, status, err) } -func respondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { - logical.RespondErrorAndData(w, status, data, err) -} - func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool { statusCode, newErr := logical.RespondErrorCommon(req, resp, err) if newErr == nil && statusCode == 0 { @@ -1210,12 +1193,6 @@ func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logic return true } - if resp != nil { - if data := resp.Data["data"]; data != nil { - respondErrorAndData(w, statusCode, data, newErr) - return true - } - } respondError(w, statusCode, newErr) return true } diff --git a/http/handler_test.go b/http/handler_test.go index 244fe41772575..49565b41e2356 100644 --- a/http/handler_test.go +++ b/http/handler_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/help.go b/http/help.go index 64085f1e38b6b..7ec6fb6131aae 100644 --- a/http/help.go +++ b/http/help.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/help_test.go b/http/help_test.go index d02c26a9521e9..ec9a67dd1c58c 100644 --- a/http/help_test.go +++ b/http/help_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/http_test.go b/http/http_test.go index 5e51ce7d0fe31..692aef0d82877 100644 --- a/http/http_test.go +++ b/http/http_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/logical.go b/http/logical.go index 8a681d9107d48..6cdf6bb071105 100644 --- a/http/logical.go +++ b/http/logical.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -16,8 +13,7 @@ import ( "strings" "time" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/experiments" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -183,10 +179,8 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. } data = parseQuery(r.URL.Query()) - case "HEAD": - op = logical.HeaderOperation - data = parseQuery(r.URL.Query()) - case "OPTIONS": + + case "OPTIONS", "HEAD": default: return nil, nil, http.StatusMethodNotAllowed, nil } @@ -352,24 +346,6 @@ func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForw return } - // Websockets need to be handled at HTTP layer instead of logical requests. - if core.IsExperimentEnabled(experiments.VaultExperimentEventsAlpha1) { - ns, err := namespace.FromContext(r.Context()) - if err != nil { - respondError(w, http.StatusInternalServerError, err) - return - } - nsPath := ns.Path - if ns.ID == namespace.RootNamespaceID { - nsPath = "" - } - if strings.HasPrefix(r.URL.Path, fmt.Sprintf("/v1/%ssys/events/subscribe/", nsPath)) { - handler := handleEventsSubscribe(core, req) - handler.ServeHTTP(w, r) - return - } - } - // Make the internal request. We attach the connection info // as well in case this is an authentication request that requires // it. Vault core handles stripping this if we need to. This also diff --git a/http/logical_test.go b/http/logical_test.go index a9ccdff0525f1..fc6fc765811e5 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -11,7 +8,6 @@ import ( "io/ioutil" "net/http" "net/http/httptest" - "os" "reflect" "strconv" "strings" @@ -22,7 +18,6 @@ import ( "github.com/hashicorp/vault/api" auditFile "github.com/hashicorp/vault/builtin/audit/file" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -479,10 +474,13 @@ func TestLogical_RespondWithStatusCode(t *testing.T) { func TestLogical_Audit_invalidWrappingToken(t *testing.T) { // Create a noop audit backend - noop := corehelpers.TestNoopAudit(t, nil) + var noop *vault.NoopAudit c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{ AuditBackends: map[string]audit.Factory{ "noop": func(ctx context.Context, config *audit.BackendConfig) (audit.Backend, error) { + noop = &vault.NoopAudit{ + Config: config, + } return noop, nil }, }, @@ -638,7 +636,7 @@ func TestLogical_AuditPort(t *testing.T) { // workaround kv-v2 initialization upgrade errors numFailures := 0 - corehelpers.RetryUntil(t, 10*time.Second, func() error { + vault.RetryUntil(t, 10*time.Second, func() error { resp, err := c.Logical().Write("kv/data/foo", writeData) if err != nil { if strings.Contains(err.Error(), "Upgrading from non-versioned to versioned data") { @@ -761,180 +759,3 @@ func TestLogical_ErrRelativePath(t *testing.T) { t.Errorf("expected response for write to include %q", logical.ErrRelativePath.Error()) } } - -func testBuiltinPluginMetadataAuditLog(t *testing.T, log map[string]interface{}, expectedMountClass string) { - if mountClass, ok := log["mount_class"].(string); !ok { - t.Fatalf("mount_class should be a string, not %T", log["mount_class"]) - } else if mountClass != expectedMountClass { - t.Fatalf("bad: mount_class should be %s, not %s", expectedMountClass, mountClass) - } - - if _, ok := log["mount_running_version"].(string); !ok { - t.Fatalf("mount_running_version should be a string, not %T", log["mount_running_version"]) - } - - if _, ok := log["mount_running_sha256"].(string); ok { - t.Fatalf("mount_running_sha256 should be nil, not %T", log["mount_running_sha256"]) - } - - if mountIsExternalPlugin, ok := log["mount_is_external_plugin"].(bool); ok && mountIsExternalPlugin { - t.Fatalf("mount_is_external_plugin should be nil or false, not %T", log["mount_is_external_plugin"]) - } -} - -// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth tests that we have plugin metadata of a builtin auth plugin -// in audit log when it is enabled -func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth(t *testing.T) { - coreConfig := &vault.CoreConfig{ - AuditBackends: map[string]audit.Factory{ - "file": auditFile.Factory, - }, - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: Handler, - }) - - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - - core := cores[0].Core - c := cluster.Cores[0].Client - vault.TestWaitActive(t, core) - - // Enable the audit backend - tempDir := t.TempDir() - auditLogFile, err := os.CreateTemp(tempDir, "") - if err != nil { - t.Fatal(err) - } - - err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ - Type: "file", - Options: map[string]string{ - "file_path": auditLogFile.Name(), - }, - }) - if err != nil { - t.Fatal(err) - } - - _, err = c.Logical().Write("auth/token/create", map[string]interface{}{ - "ttl": "10s", - }) - if err != nil { - t.Fatal(err) - } - - // Check the audit trail on request and response - decoder := json.NewDecoder(auditLogFile) - var auditRecord map[string]interface{} - for decoder.Decode(&auditRecord) == nil { - auditRequest := map[string]interface{}{} - if req, ok := auditRecord["request"]; ok { - auditRequest = req.(map[string]interface{}) - if auditRequest["path"] != "auth/token/create" { - continue - } - } - testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeCredential.String()) - - auditResponse := map[string]interface{}{} - if req, ok := auditRecord["response"]; ok { - auditRequest = req.(map[string]interface{}) - if auditResponse["path"] != "auth/token/create" { - continue - } - } - testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeCredential.String()) - } -} - -// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret tests that we have plugin metadata of a builtin secret plugin -// in audit log when it is enabled -func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret(t *testing.T) { - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "kv": kv.VersionedKVFactory, - }, - AuditBackends: map[string]audit.Factory{ - "file": auditFile.Factory, - }, - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: Handler, - }) - - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - - core := cores[0].Core - c := cluster.Cores[0].Client - vault.TestWaitActive(t, core) - - if err := c.Sys().Mount("kv/", &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) - } - - // Enable the audit backend - tempDir := t.TempDir() - auditLogFile, err := os.CreateTemp(tempDir, "") - if err != nil { - t.Fatal(err) - } - - err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ - Type: "file", - Options: map[string]string{ - "file_path": auditLogFile.Name(), - }, - }) - if err != nil { - t.Fatal(err) - } - - { - writeData := map[string]interface{}{ - "data": map[string]interface{}{ - "bar": "a", - }, - } - corehelpers.RetryUntil(t, 10*time.Second, func() error { - resp, err := c.Logical().Write("kv/data/foo", writeData) - if err != nil { - t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) - } - return nil - }) - } - - // Check the audit trail on request and response - decoder := json.NewDecoder(auditLogFile) - var auditRecord map[string]interface{} - for decoder.Decode(&auditRecord) == nil { - auditRequest := map[string]interface{}{} - if req, ok := auditRecord["request"]; ok { - auditRequest = req.(map[string]interface{}) - if auditRequest["path"] != "kv/data/foo" { - continue - } - } - testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeSecrets.String()) - - auditResponse := map[string]interface{}{} - if req, ok := auditRecord["response"]; ok { - auditRequest = req.(map[string]interface{}) - if auditResponse["path"] != "kv/data/foo" { - continue - } - } - testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeSecrets.String()) - } -} diff --git a/http/plugin_test.go b/http/plugin_test.go index b0d85be6d1734..164a3d25f664d 100644 --- a/http/plugin_test.go +++ b/http/plugin_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_audit_test.go b/http/sys_audit_test.go index 2ec4ffc30c51b..58873bfb12aa1 100644 --- a/http/sys_audit_test.go +++ b/http/sys_audit_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_auth_test.go b/http/sys_auth_test.go index 3bd0a009dda84..2d1fdf8144cb5 100644 --- a/http/sys_auth_test.go +++ b/http/sys_auth_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -11,7 +8,6 @@ import ( "time" "github.com/go-test/deep" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" @@ -489,7 +485,7 @@ func TestSysRemountAuth(t *testing.T) { // Poll until the remount succeeds var remountResp map[string]interface{} testResponseBody(t, resp, &remountResp) - corehelpers.RetryUntil(t, 5*time.Second, func() error { + vault.RetryUntil(t, 5*time.Second, func() error { resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string)) testResponseStatus(t, resp, 200) diff --git a/http/sys_config_cors_test.go b/http/sys_config_cors_test.go index 2f4a29a49e066..3ad0e810a2c5a 100644 --- a/http/sys_config_cors_test.go +++ b/http/sys_config_cors_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_config_state_test.go b/http/sys_config_state_test.go index c8f6c402bddb8..b8d7dc27c7539 100644 --- a/http/sys_config_state_test.go +++ b/http/sys_config_state_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -151,10 +148,7 @@ func TestSysConfigState_Sanitized(t *testing.T) { "disable_performance_standby": false, "disable_printable_check": false, "disable_sealwrap": false, - "experiments": nil, "raw_storage_endpoint": false, - "detect_deadlocks": "", - "introspection_endpoint": false, "disable_sentinel_trace": false, "enable_ui": false, "log_format": "", diff --git a/http/sys_feature_flags.go b/http/sys_feature_flags.go index 9f654b7febda6..11ece32795b77 100644 --- a/http/sys_feature_flags.go +++ b/http/sys_feature_flags.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_generate_root.go b/http/sys_generate_root.go index 7f953e4d449a7..db2da6f7f3b17 100644 --- a/http/sys_generate_root.go +++ b/http/sys_generate_root.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_generate_root_test.go b/http/sys_generate_root_test.go index dbd7796315a6c..f226d0042b1d7 100644 --- a/http/sys_generate_root_test.go +++ b/http/sys_generate_root_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -15,10 +12,8 @@ import ( "testing" "github.com/go-test/deep" - "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/pgpkeys" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/xor" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" @@ -229,11 +224,9 @@ func enableNoopAudit(t *testing.T, token string, core *vault.Core) { func testCoreUnsealedWithAudit(t *testing.T, records **[][]byte) (*vault.Core, [][]byte, string) { conf := &vault.CoreConfig{ - BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), - AuditBackends: map[string]audit.Factory{ - "noop": corehelpers.NoopAuditFactory(records), - }, + BuiltinRegistry: vault.NewMockBuiltinRegistry(), } + vault.AddNoopAudit(conf, records) core, keys, token := vault.TestCoreUnsealedWithConfig(t, conf) return core, keys, token } diff --git a/http/sys_health.go b/http/sys_health.go index b3f29d4dd5e68..fcaf4e1590999 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -13,8 +10,8 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/version" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/version" ) func handleSysHealth(core *vault.Core) http.Handler { diff --git a/http/sys_health_test.go b/http/sys_health_test.go index 9761ec16c98ca..68ef11b9e2f57 100644 --- a/http/sys_health_test.go +++ b/http/sys_health_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_hostinfo_test.go b/http/sys_hostinfo_test.go index 756841e724f73..af313a382b2b2 100644 --- a/http/sys_hostinfo_test.go +++ b/http/sys_hostinfo_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_in_flight_requests.go b/http/sys_in_flight_requests.go index bdf3ebaf9d20a..b38156f38ebc4 100644 --- a/http/sys_in_flight_requests.go +++ b/http/sys_in_flight_requests.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_in_flight_requests_test.go b/http/sys_in_flight_requests_test.go index 93c92c5398355..de64d708c68f2 100644 --- a/http/sys_in_flight_requests_test.go +++ b/http/sys_in_flight_requests_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_init.go b/http/sys_init.go index 905916bef6283..ae3059462bef4 100644 --- a/http/sys_init.go +++ b/http/sys_init.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_init_test.go b/http/sys_init_test.go index 79dd275824bd1..4953c4244ce82 100644 --- a/http/sys_init_test.go +++ b/http/sys_init_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -153,11 +150,12 @@ func TestSysInit_Put_ValidateParams(t *testing.T) { } func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { - testSeal, _ := seal.NewTestSeal(&seal.TestSealOpts{Name: "transit"}) + testSeal := seal.NewTestSeal(nil) autoSeal, err := vault.NewAutoSeal(testSeal) if err != nil { t.Fatal(err) } + autoSeal.SetType("transit") // Create the transit server. conf := &vault.CoreConfig{ diff --git a/http/sys_internal_test.go b/http/sys_internal_test.go index 11d9376248c56..d3c066f70c847 100644 --- a/http/sys_internal_test.go +++ b/http/sys_internal_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_leader.go b/http/sys_leader.go index 6b39c4401af3f..8c2ce21e5001d 100644 --- a/http/sys_leader.go +++ b/http/sys_leader.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_leader_test.go b/http/sys_leader_test.go index 3292b7f2407bc..974b3a7b7e40f 100644 --- a/http/sys_leader_test.go +++ b/http/sys_leader_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_lease_test.go b/http/sys_lease_test.go index 6b069ca37cedb..a254be71c211c 100644 --- a/http/sys_lease_test.go +++ b/http/sys_lease_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_metrics.go b/http/sys_metrics.go index 2bb819b34fd81..012417282e5f5 100644 --- a/http/sys_metrics.go +++ b/http/sys_metrics.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_metrics_test.go b/http/sys_metrics_test.go index 167347b4f7006..241caac2db8fa 100644 --- a/http/sys_metrics_test.go +++ b/http/sys_metrics_test.go @@ -1,14 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/armon/go-metrics" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/internalshared/configutil" @@ -19,7 +14,7 @@ func TestSysMetricsUnauthenticated(t *testing.T) { inm := metrics.NewInmemSink(10*time.Second, time.Minute) metrics.DefaultInmemSignal(inm) conf := &vault.CoreConfig{ - BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + BuiltinRegistry: vault.NewMockBuiltinRegistry(), MetricsHelper: metricsutil.NewMetricsHelper(inm, true), } core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) diff --git a/http/sys_monitor_test.go b/http/sys_monitor_test.go index 5d428c419a9cd..733862ec448f4 100644 --- a/http/sys_monitor_test.go +++ b/http/sys_monitor_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -10,16 +7,15 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault" ) func TestSysMonitorUnknownLogLevel(t *testing.T) { - t.Parallel() - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - NumCores: 1, - }) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{HandlerFunc: Handler}) + cluster.Start() defer cluster.Cleanup() client := cluster.Cores[0].Client @@ -41,11 +37,8 @@ func TestSysMonitorUnknownLogLevel(t *testing.T) { } func TestSysMonitorUnknownLogFormat(t *testing.T) { - t.Parallel() - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - NumCores: 1, - }) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{HandlerFunc: Handler}) + cluster.Start() defer cluster.Cleanup() client := cluster.Cores[0].Client @@ -67,61 +60,64 @@ func TestSysMonitorUnknownLogFormat(t *testing.T) { } func TestSysMonitorStreamingLogs(t *testing.T) { - t.Parallel() - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - NumCores: 1, + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Output: log.DefaultOutput, + Level: log.Debug, + JSONFormat: logging.ParseEnvLogFormat() == logging.JSONFormat, }) + + lf := logging.ParseEnvLogFormat().String() + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{HandlerFunc: Handler, Logger: logger}) + cluster.Start() defer cluster.Cleanup() client := cluster.Cores[0].Client stopCh := testhelpers.GenerateDebugLogs(t, client) - defer close(stopCh) - for _, lf := range []string{"standard", "json"} { - t.Run(lf, func(t *testing.T) { - debugCount := 0 + debugCount := 0 + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + defer cancel() + logCh, err := client.Sys().Monitor(ctx, "DEBUG", lf) + if err != nil { + t.Fatal(err) + } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + type jsonlog struct { + Level string `json:"@level"` + Message string `json:"@message"` + TimeStamp string `json:"@timestamp"` + } + jsonLog := &jsonlog{} - logCh, err := client.Sys().Monitor(ctx, "DEBUG", lf) - if err != nil { - t.Fatal(err) - } + timeCh := time.After(5 * time.Second) - type jsonlog struct { - Level string `json:"@level"` - Message string `json:"@message"` - TimeStamp string `json:"@timestamp"` - } - jsonLog := &jsonlog{} - - timeCh := time.After(5 * time.Second) - - for { - select { - case log := <-logCh: - if lf == "json" { - err := json.Unmarshal([]byte(log), jsonLog) - if err != nil { - t.Fatal("Expected JSON log from channel") - } - if strings.Contains(jsonLog.Level, "debug") { - debugCount++ - } - } else if strings.Contains(log, "[DEBUG]") { - debugCount++ - } - if debugCount > 3 { - // If we've seen multiple lines that match what we want, - // it's probably safe to assume streaming is working - return - } - case <-timeCh: - t.Fatal("Failed to get a DEBUG message after 5 seconds") + for { + select { + case log := <-logCh: + if lf == "json" { + err := json.Unmarshal([]byte(log), jsonLog) + if err != nil { + t.Fatal("Expected JSON log from channel") + } + if strings.Contains(jsonLog.Level, "debug") { + debugCount++ } } - }) + if strings.Contains(log, "[DEBUG]") { + debugCount++ + } + case <-timeCh: + t.Fatal("Failed to get a DEBUG message after 5 seconds") + } + + // If we've seen multiple lines that match what we want, + // it's probably safe to assume streaming is working + if debugCount > 3 { + stopCh <- struct{}{} + break + } } + + <-stopCh } diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go index 384f5bf810bf9..420abcc87234e 100644 --- a/http/sys_mount_test.go +++ b/http/sys_mount_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -12,7 +9,6 @@ import ( "github.com/fatih/structs" "github.com/go-test/deep" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" @@ -506,7 +502,7 @@ func TestSysRemount(t *testing.T) { // Poll until the remount succeeds var remountResp map[string]interface{} testResponseBody(t, resp, &remountResp) - corehelpers.RetryUntil(t, 5*time.Second, func() error { + vault.RetryUntil(t, 5*time.Second, func() error { resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string)) testResponseStatus(t, resp, 200) diff --git a/http/sys_mounts_test.go b/http/sys_mounts_test.go index 5f2218514ec07..7c113d9879509 100644 --- a/http/sys_mounts_test.go +++ b/http/sys_mounts_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_policy_test.go b/http/sys_policy_test.go index 1ab1e85bb7acc..6844a5321d6a6 100644 --- a/http/sys_policy_test.go +++ b/http/sys_policy_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_raft.go b/http/sys_raft.go index 1e00ebe5d90cd..428aad4f7da3a 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_rekey.go b/http/sys_rekey.go index c05dc83976531..d1cec653a6283 100644 --- a/http/sys_rekey.go +++ b/http/sys_rekey.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_rekey_test.go b/http/sys_rekey_test.go index eaef4dd7a1f3b..fd068ba48bda1 100644 --- a/http/sys_rekey_test.go +++ b/http/sys_rekey_test.go @@ -1,57 +1,46 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( "encoding/hex" "encoding/json" "fmt" + "net/http" "reflect" "testing" "github.com/go-test/deep" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/vault" ) // Test to check if the API errors out when wrong number of PGP keys are // supplied for rekey func TestSysRekey_Init_pgpKeysEntriesForRekey(t *testing.T) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - RequestResponseCallback: schema.ResponseValidatingCallback(t), - }) - cluster.Start() - defer cluster.Cleanup() - cl := cluster.Cores[0].Client + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) - _, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ + resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ "secret_shares": 5, "secret_threshold": 3, "pgp_keys": []string{"pgpkey1"}, }) - if err == nil { - t.Fatal("should have failed to write pgp key entry due to mismatched keys", err) - } + testResponseStatus(t, resp, 400) } func TestSysRekey_Init_Status(t *testing.T) { t.Run("status-barrier-default", func(t *testing.T) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - RequestResponseCallback: schema.ResponseValidatingCallback(t), - }) - cluster.Start() - defer cluster.Cleanup() - cl := cluster.Cores[0].Client + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) - resp, err := cl.Logical().Read("sys/rekey/init") + resp, err := http.Get(addr + "/v1/sys/rekey/init") if err != nil { t.Fatalf("err: %s", err) } - actual := resp.Data + var actual map[string]interface{} expected := map[string]interface{}{ "started": false, "t": json.Number("0"), @@ -63,7 +52,8 @@ func TestSysRekey_Init_Status(t *testing.T) { "nonce": "", "verification_required": false, } - + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) if !reflect.DeepEqual(actual, expected) { t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) } @@ -72,24 +62,19 @@ func TestSysRekey_Init_Status(t *testing.T) { func TestSysRekey_Init_Setup(t *testing.T) { t.Run("init-barrier-barrier-key", func(t *testing.T) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - RequestResponseCallback: schema.ResponseValidatingCallback(t), - }) - cluster.Start() - defer cluster.Cleanup() - cl := cluster.Cores[0].Client + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) // Start rekey - resp, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ + resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ "secret_shares": 5, "secret_threshold": 3, }) - if err != nil { - t.Fatalf("err: %s", err) - } + testResponseStatus(t, resp, 200) - actual := resp.Data + var actual map[string]interface{} expected := map[string]interface{}{ "started": true, "t": json.Number("3"), @@ -100,7 +85,8 @@ func TestSysRekey_Init_Setup(t *testing.T) { "backup": false, "verification_required": false, } - + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) if actual["nonce"].(string) == "" { t.Fatalf("nonce was empty") } @@ -110,12 +96,9 @@ func TestSysRekey_Init_Setup(t *testing.T) { } // Get rekey status - resp, err = cl.Logical().Read("sys/rekey/init") - if err != nil { - t.Fatalf("err: %s", err) - } + resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init") - actual = resp.Data + actual = map[string]interface{}{} expected = map[string]interface{}{ "started": true, "t": json.Number("3"), @@ -126,6 +109,8 @@ func TestSysRekey_Init_Setup(t *testing.T) { "backup": false, "verification_required": false, } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) if actual["nonce"].(string) == "" { t.Fatalf("nonce was empty") } @@ -141,33 +126,26 @@ func TestSysRekey_Init_Setup(t *testing.T) { func TestSysRekey_Init_Cancel(t *testing.T) { t.Run("cancel-barrier-barrier-key", func(t *testing.T) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: Handler, - RequestResponseCallback: schema.ResponseValidatingCallback(t), - }) - cluster.Start() - defer cluster.Cleanup() - cl := cluster.Cores[0].Client + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) - _, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ + resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ "secret_shares": 5, "secret_threshold": 3, }) - if err != nil { - t.Fatalf("err: %s", err) - } + testResponseStatus(t, resp, 200) - _, err = cl.Logical().Delete("sys/rekey/init") - if err != nil { - t.Fatalf("err: %s", err) - } + resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init") + testResponseStatus(t, resp, 204) - resp, err := cl.Logical().Read("sys/rekey/init") + resp, err := http.Get(addr + "/v1/sys/rekey/init") if err != nil { t.Fatalf("err: %s", err) } - actual := resp.Data + var actual map[string]interface{} expected := map[string]interface{}{ "started": false, "t": json.Number("0"), @@ -179,6 +157,8 @@ func TestSysRekey_Init_Cancel(t *testing.T) { "nonce": "", "verification_required": false, } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) if !reflect.DeepEqual(actual, expected) { t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) } diff --git a/http/sys_rotate_test.go b/http/sys_rotate_test.go index dfc28a257c559..81597c7008e01 100644 --- a/http/sys_rotate_test.go +++ b/http/sys_rotate_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_seal.go b/http/sys_seal.go index 5d32828e70f9d..24f491b65d1d6 100644 --- a/http/sys_seal.go +++ b/http/sys_seal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go index ef5922d52220f..e991f44163c73 100644 --- a/http/sys_seal_test.go +++ b/http/sys_seal_test.go @@ -1,25 +1,18 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( - "context" - "encoding/base64" "encoding/hex" "encoding/json" "fmt" "net/http" "strconv" - "strings" "testing" "github.com/go-test/deep" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal" - "github.com/hashicorp/vault/version" ) func TestSysSealStatus(t *testing.T) { @@ -162,173 +155,16 @@ func TestSysUnseal(t *testing.T) { } } -func subtestBadSingleKey(t *testing.T, seal vault.Seal) { - core := vault.TestCoreWithSeal(t, seal, false) - _, err := core.Initialize(context.Background(), &vault.InitParams{ - BarrierConfig: &vault.SealConfig{ - SecretShares: 1, - SecretThreshold: 1, - }, - RecoveryConfig: &vault.SealConfig{ - SecretShares: 1, - SecretThreshold: 1, - }, - }) - if err != nil { - t.Fatalf("err: %s", err) - } - +func TestSysUnseal_badKey(t *testing.T) { + core := vault.TestCore(t) + vault.TestCoreInit(t, core) ln, addr := TestServer(t, core) defer ln.Close() - testCases := []struct { - description string - key string - }{ - // hex key tests - // hexadecimal strings have 2 symbols per byte; size(0xAA) == 1 byte - { - "short hex key", - strings.Repeat("AA", 8), - }, - { - "long hex key", - strings.Repeat("AA", 34), - }, - { - "uneven hex key byte length", - strings.Repeat("AA", 33), - }, - { - "valid hex key but wrong cluster", - "4482691dd3a710723c4f77c4920ee21b96c226bf4829fa6eb8e8262c180ae933", - }, - - // base64 key tests - // base64 strings have min. 1 character per byte; size("m") == 1 byte - { - "short b64 key", - base64.StdEncoding.EncodeToString([]byte(strings.Repeat("m", 8))), - }, - { - "long b64 key", - base64.StdEncoding.EncodeToString([]byte(strings.Repeat("m", 34))), - }, - { - "uneven b64 key byte length", - base64.StdEncoding.EncodeToString([]byte(strings.Repeat("m", 33))), - }, - { - "valid b64 key but wrong cluster", - "RIJpHdOnEHI8T3fEkg7iG5bCJr9IKfpuuOgmLBgK6TM=", - }, - - // other key tests - { - "empty key", - "", - }, - { - "key with bad format", - "ThisKeyIsNeitherB64NorHex", - }, - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ - "key": tc.key, - }) - - testResponseStatus(t, resp, 400) - }) - } -} - -func subtestBadMultiKey(t *testing.T, seal vault.Seal) { - numKeys := 3 - - core := vault.TestCoreWithSeal(t, seal, false) - _, err := core.Initialize(context.Background(), &vault.InitParams{ - BarrierConfig: &vault.SealConfig{ - SecretShares: numKeys, - SecretThreshold: numKeys, - }, - RecoveryConfig: &vault.SealConfig{ - SecretShares: numKeys, - SecretThreshold: numKeys, - }, + resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ + "key": "0123", }) - if err != nil { - t.Fatalf("err: %s", err) - } - ln, addr := TestServer(t, core) - defer ln.Close() - - testCases := []struct { - description string - keys []string - }{ - { - "all unseal keys from another cluster", - []string{ - "b189d98fdec3a15bed9b1cce5088f82b92896696b788c07bdf03c73da08279a5e8", - "0fa98232f034177d8d9c2824899a2ac1e55dc6799348533e10510b856aef99f61a", - "5344f5caa852f9ba1967d9623ed286a45ea7c4a529522d25f05d29ff44f17930ac", - }, - }, - { - "mixing unseal keys from different cluster, different share config", - []string{ - "b189d98fdec3a15bed9b1cce5088f82b92896696b788c07bdf03c73da08279a5e8", - "0fa98232f034177d8d9c2824899a2ac1e55dc6799348533e10510b856aef99f61a", - "e04ea3020838c2050c4a169d7ba4d30e034eec8e83e8bed9461bf2646ee412c0", - }, - }, - { - "mixing unseal keys from different clusters, similar share config", - []string{ - "b189d98fdec3a15bed9b1cce5088f82b92896696b788c07bdf03c73da08279a5e8", - "0fa98232f034177d8d9c2824899a2ac1e55dc6799348533e10510b856aef99f61a", - "413f80521b393aa6c4e42e9a3a3ab7f00c2002b2c3bf1e273fc6f363f35f2a378b", - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - for i, key := range tc.keys { - resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ - "key": key, - }) - - if i == numKeys-1 { - // last key - testResponseStatus(t, resp, 400) - } else { - // unseal in progress - testResponseStatus(t, resp, 200) - } - - } - }) - } -} - -func TestSysUnseal_BadKeyNewShamir(t *testing.T) { - seal := vault.NewTestSeal(t, - &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedShamirRoot}) - - subtestBadSingleKey(t, seal) - subtestBadMultiKey(t, seal) -} - -func TestSysUnseal_BadKeyAutoUnseal(t *testing.T) { - seal := vault.NewTestSeal(t, - &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedGeneric}) - - subtestBadSingleKey(t, seal) - subtestBadMultiKey(t, seal) + testResponseStatus(t, resp, 400) } func TestSysUnseal_Reset(t *testing.T) { diff --git a/http/sys_wrapping_test.go b/http/sys_wrapping_test.go index c991bd2304800..17520e78cf324 100644 --- a/http/sys_wrapping_test.go +++ b/http/sys_wrapping_test.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( "encoding/json" - "errors" "reflect" "testing" "time" @@ -370,20 +366,4 @@ func TestHTTP_Wrapping(t *testing.T) { }) { t.Fatalf("secret data did not match expected: %#v", secret.Data) } - - // Ensure that wrapping lookup without a client token responds correctly - client.ClearToken() - secret, err = client.Logical().Read("sys/wrapping/lookup") - if secret != nil { - t.Fatalf("expected no response: %#v", secret) - } - - if err == nil { - t.Fatal("expected error") - } - - var respError *api.ResponseError - if errors.As(err, &respError); respError.StatusCode != 403 { - t.Fatalf("expected 403 response, actual: %d", respError.StatusCode) - } } diff --git a/http/testing.go b/http/testing.go index 95153991e7667..0d2c58a619885 100644 --- a/http/testing.go +++ b/http/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( @@ -34,7 +31,7 @@ func TestServerWithListenerAndProperties(tb testing.TB, ln net.Listener, addr st // for tests. mux := http.NewServeMux() mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth)) - mux.Handle("/", Handler.Handler(props)) + mux.Handle("/", Handler(props)) server := &http.Server{ Addr: ln.Addr().String(), diff --git a/http/unwrapping_raw_body_test.go b/http/unwrapping_raw_body_test.go index e1ad0df9c297a..6ba24b7c9098d 100644 --- a/http/unwrapping_raw_body_test.go +++ b/http/unwrapping_raw_body_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/http/util.go b/http/util.go index f714efa54f3bb..b4c8923cc3eea 100644 --- a/http/util.go +++ b/http/util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package http import ( diff --git a/internal/go118_sha1_patch.go b/internal/go118_sha1_patch.go index fc2ccf238266e..f3b3cea68847d 100644 --- a/internal/go118_sha1_patch.go +++ b/internal/go118_sha1_patch.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package internal import ( @@ -10,7 +7,7 @@ import ( _ "unsafe" // for go:linkname goversion "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/version" + "github.com/hashicorp/vault/sdk/version" ) const sha1PatchVersionsBefore = "1.12.0" diff --git a/internalshared/configutil/config.go b/internalshared/configutil/config.go index 99777229f0d49..5c12e03b949f1 100644 --- a/internalshared/configutil/config.go +++ b/internalshared/configutil/config.go @@ -1,10 +1,8 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( "fmt" + "io/ioutil" "time" "github.com/hashicorp/go-secure-stdlib/parseutil" @@ -23,8 +21,6 @@ type SharedConfig struct { Listeners []*Listener `hcl:"-"` - UserLockouts []*UserLockout `hcl:"-"` - Seals []*KMS `hcl:"-"` Entropy *Entropy `hcl:"-"` @@ -41,20 +37,33 @@ type SharedConfig struct { // LogFormat specifies the log format. Valid values are "standard" and // "json". The values are case-insenstive. If no log format is specified, // then standard format will be used. - LogFormat string `hcl:"log_format"` - LogLevel string `hcl:"log_level"` - LogFile string `hcl:"log_file"` - LogRotateDuration string `hcl:"log_rotate_duration"` - LogRotateBytes int `hcl:"log_rotate_bytes"` - LogRotateBytesRaw interface{} `hcl:"log_rotate_bytes"` - LogRotateMaxFiles int `hcl:"log_rotate_max_files"` - LogRotateMaxFilesRaw interface{} `hcl:"log_rotate_max_files"` + LogFormat string `hcl:"log_format"` + LogLevel string `hcl:"log_level"` PidFile string `hcl:"pid_file"` ClusterName string `hcl:"cluster_name"` } +// LoadConfigFile loads the configuration from the given file. +func LoadConfigFile(path string) (*SharedConfig, error) { + // Read the file + d, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return ParseConfig(string(d)) +} + +func LoadConfigKMSes(path string) ([]*KMS, error) { + // Read the file + d, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return ParseKMSes(string(d)) +} + func ParseConfig(d string) (*SharedConfig, error) { // Parse! obj, err := hcl.Parse(d) @@ -125,13 +134,6 @@ func ParseConfig(d string) (*SharedConfig, error) { } } - if o := list.Filter("user_lockout"); len(o.Items) > 0 { - result.found("user_lockout", "UserLockout") - if err := ParseUserLockouts(&result, o); err != nil { - return nil, fmt.Errorf("error parsing 'user_lockout': %w", err) - } - } - if o := list.Filter("telemetry"); len(o.Items) > 0 { result.found("telemetry", "Telemetry") if err := parseTelemetry(&result, o); err != nil { @@ -192,22 +194,6 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { result["listeners"] = sanitizedListeners } - // Sanitize user lockout stanza - if len(c.UserLockouts) != 0 { - var sanitizedUserLockouts []interface{} - for _, userlockout := range c.UserLockouts { - cleanUserLockout := map[string]interface{}{ - "type": userlockout.Type, - "lockout_threshold": userlockout.LockoutThreshold, - "lockout_duration": userlockout.LockoutDuration, - "lockout_counter_reset": userlockout.LockoutCounterReset, - "disable_lockout": userlockout.DisableLockout, - } - sanitizedUserLockouts = append(sanitizedUserLockouts, cleanUserLockout) - } - result["user_lockout_configs"] = sanitizedUserLockouts - } - // Sanitize seals stanza if len(c.Seals) != 0 { var sanitizedSeals []interface{} diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go index 3fd4bb9487130..05cb061e7e9b6 100644 --- a/internalshared/configutil/config_util.go +++ b/internalshared/configutil/config_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !enterprise package configutil diff --git a/internalshared/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go index f0e5fcc04270b..1e9f830901c44 100644 --- a/internalshared/configutil/encrypt_decrypt.go +++ b/internalshared/configutil/encrypt_decrypt.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go index 19bf6858338b8..b9257bb6c2b2d 100644 --- a/internalshared/configutil/encrypt_decrypt_test.go +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/hcp_link.go b/internalshared/configutil/hcp_link.go index fd8d6b6ca8539..a46c3bb1f5530 100644 --- a/internalshared/configutil/hcp_link.go +++ b/internalshared/configutil/hcp_link.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/http_response_headers.go b/internalshared/configutil/http_response_headers.go index b808f9e6522c9..2db3034e588b6 100644 --- a/internalshared/configutil/http_response_headers.go +++ b/internalshared/configutil/http_response_headers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/kms.go b/internalshared/configutil/kms.go index 025018124984d..614a6ec8e5712 100644 --- a/internalshared/configutil/kms.go +++ b/internalshared/configutil/kms.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/lint.go b/internalshared/configutil/lint.go index 24b968e6b96b0..2b5b634156b3c 100644 --- a/internalshared/configutil/lint.go +++ b/internalshared/configutil/lint.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/listener.go b/internalshared/configutil/listener.go index 5e9373a169c77..3f3c5993025c8 100644 --- a/internalshared/configutil/listener.go +++ b/internalshared/configutil/listener.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( @@ -47,7 +44,6 @@ type Listener struct { Type string Purpose []string `hcl:"-"` PurposeRaw interface{} `hcl:"purpose"` - Role string `hcl:"role"` Address string `hcl:"address"` ClusterAddress string `hcl:"cluster_address"` @@ -100,8 +96,6 @@ type Listener struct { AgentAPI *AgentAPI `hcl:"agent_api"` - ProxyAPI *ProxyAPI `hcl:"proxy_api"` - Telemetry ListenerTelemetry `hcl:"telemetry"` Profiling ListenerProfiling `hcl:"profiling"` InFlightRequestLogging ListenerInFlightRequestLogging `hcl:"inflight_requests_logging"` @@ -125,11 +119,6 @@ type AgentAPI struct { EnableQuit bool `hcl:"enable_quit"` } -// ProxyAPI allows users to select which parts of the Vault Proxy API they want enabled. -type ProxyAPI struct { - EnableQuit bool `hcl:"enable_quit"` -} - func (l *Listener) GoString() string { return fmt.Sprintf("*%#v", *l) } @@ -193,13 +182,6 @@ func ParseListeners(result *SharedConfig, list *ast.ObjectList) error { l.PurposeRaw = nil } - - switch l.Role { - case "default", "metrics_only", "": - result.found(l.Type, l.Type) - default: - return multierror.Prefix(fmt.Errorf("unsupported listener role %q", l.Role), fmt.Sprintf("listeners.%d:", i)) - } } // Request Parameters diff --git a/internalshared/configutil/listener_test.go b/internalshared/configutil/listener_test.go index da7d76596b6fe..803086e483e23 100644 --- a/internalshared/configutil/listener_test.go +++ b/internalshared/configutil/listener_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/merge.go b/internalshared/configutil/merge.go index 940e8bfcfb2cc..8ae99ca4879d7 100644 --- a/internalshared/configutil/merge.go +++ b/internalshared/configutil/merge.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { @@ -17,13 +14,6 @@ func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { result.Listeners = append(result.Listeners, l) } - for _, userlockout := range c.UserLockouts { - result.UserLockouts = append(result.UserLockouts, userlockout) - } - for _, userlockout := range c2.UserLockouts { - result.UserLockouts = append(result.UserLockouts, userlockout) - } - result.HCPLinkConf = c.HCPLinkConf if c2.HCPLinkConf != nil { result.HCPLinkConf = c2.HCPLinkConf @@ -66,28 +56,6 @@ func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { result.LogFormat = c2.LogFormat } - result.LogFile = c.LogFile - if c2.LogFile != "" { - result.LogFile = c2.LogFile - } - - result.LogRotateBytes = c.LogRotateBytes - if c2.LogRotateBytesRaw != nil { - result.LogRotateBytes = c2.LogRotateBytes - result.LogRotateBytesRaw = c2.LogRotateBytesRaw - } - - result.LogRotateMaxFiles = c.LogRotateMaxFiles - if c2.LogRotateMaxFilesRaw != nil { - result.LogRotateMaxFiles = c2.LogRotateMaxFiles - result.LogRotateMaxFilesRaw = c2.LogRotateMaxFilesRaw - } - - result.LogRotateDuration = c.LogRotateDuration - if c2.LogRotateDuration != "" { - result.LogRotateDuration = c2.LogRotateDuration - } - result.PidFile = c.PidFile if c2.PidFile != "" { result.PidFile = c2.PidFile diff --git a/internalshared/configutil/telemetry.go b/internalshared/configutil/telemetry.go index 270eb493d5ee7..77620770db3e6 100644 --- a/internalshared/configutil/telemetry.go +++ b/internalshared/configutil/telemetry.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/telemetry_test.go b/internalshared/configutil/telemetry_test.go index aaeb808171ab7..dda74711dcb55 100644 --- a/internalshared/configutil/telemetry_test.go +++ b/internalshared/configutil/telemetry_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package configutil import ( diff --git a/internalshared/configutil/userlockout.go b/internalshared/configutil/userlockout.go deleted file mode 100644 index df76308ddb5ab..0000000000000 --- a/internalshared/configutil/userlockout.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package configutil - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" -) - -const ( - UserLockoutThresholdDefault = 5 - UserLockoutDurationDefault = 15 * time.Minute - UserLockoutCounterResetDefault = 15 * time.Minute - DisableUserLockoutDefault = false -) - -type UserLockout struct { - Type string - LockoutThreshold uint64 `hcl:"-"` - LockoutThresholdRaw interface{} `hcl:"lockout_threshold"` - LockoutDuration time.Duration `hcl:"-"` - LockoutDurationRaw interface{} `hcl:"lockout_duration"` - LockoutCounterReset time.Duration `hcl:"-"` - LockoutCounterResetRaw interface{} `hcl:"lockout_counter_reset"` - DisableLockout bool `hcl:"-"` - DisableLockoutRaw interface{} `hcl:"disable_lockout"` -} - -func GetSupportedUserLockoutsAuthMethods() []string { - return []string{"userpass", "approle", "ldap"} -} - -func ParseUserLockouts(result *SharedConfig, list *ast.ObjectList) error { - var err error - result.UserLockouts = make([]*UserLockout, 0, len(list.Items)) - userLockoutsMap := make(map[string]*UserLockout) - for i, item := range list.Items { - var userLockoutConfig UserLockout - if err := hcl.DecodeObject(&userLockoutConfig, item.Val); err != nil { - return multierror.Prefix(err, fmt.Sprintf("userLockouts.%d:", i)) - } - - // Base values - { - switch { - case userLockoutConfig.Type != "": - case len(item.Keys) == 1: - userLockoutConfig.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) - default: - return multierror.Prefix(errors.New("auth type for user lockout must be specified, if it applies to all auth methods specify \"all\" "), fmt.Sprintf("user_lockouts.%d:", i)) - } - - userLockoutConfig.Type = strings.ToLower(userLockoutConfig.Type) - // Supported auth methods for user lockout configuration: ldap, approle, userpass - // "all" is used to apply the configuration to all supported auth methods - switch userLockoutConfig.Type { - case "all", "ldap", "approle", "userpass": - result.found(userLockoutConfig.Type, userLockoutConfig.Type) - default: - return multierror.Prefix(fmt.Errorf("unsupported auth type %q", userLockoutConfig.Type), fmt.Sprintf("user_lockouts.%d:", i)) - } - } - - // Lockout Parameters - - // Not setting raw entries to nil here as soon as they are parsed - // as they are used to set the missing user lockout configuration values later. - { - if userLockoutConfig.LockoutThresholdRaw != nil { - userLockoutThresholdString := fmt.Sprintf("%v", userLockoutConfig.LockoutThresholdRaw) - if userLockoutConfig.LockoutThreshold, err = strconv.ParseUint(userLockoutThresholdString, 10, 64); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing lockout_threshold: %w", err), fmt.Sprintf("user_lockouts.%d", i)) - } - } - - if userLockoutConfig.LockoutDurationRaw != nil { - if userLockoutConfig.LockoutDuration, err = parseutil.ParseDurationSecond(userLockoutConfig.LockoutDurationRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing lockout_duration: %w", err), fmt.Sprintf("user_lockouts.%d", i)) - } - if userLockoutConfig.LockoutDuration < 0 { - return multierror.Prefix(errors.New("lockout_duration cannot be negative"), fmt.Sprintf("user_lockouts.%d", i)) - } - - } - - if userLockoutConfig.LockoutCounterResetRaw != nil { - if userLockoutConfig.LockoutCounterReset, err = parseutil.ParseDurationSecond(userLockoutConfig.LockoutCounterResetRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing lockout_counter_reset: %w", err), fmt.Sprintf("user_lockouts.%d", i)) - } - if userLockoutConfig.LockoutCounterReset < 0 { - return multierror.Prefix(errors.New("lockout_counter_reset cannot be negative"), fmt.Sprintf("user_lockouts.%d", i)) - } - - } - if userLockoutConfig.DisableLockoutRaw != nil { - if userLockoutConfig.DisableLockout, err = parseutil.ParseBool(userLockoutConfig.DisableLockoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for disable_lockout: %w", err), fmt.Sprintf("user_lockouts.%d", i)) - } - } - } - userLockoutsMap[userLockoutConfig.Type] = &userLockoutConfig - } - - // Use raw entries to set values for user lockout configurations fields - // that were not configured using config file. - // The raw entries would mean that the entry was configured by the user using the config file. - // If any of these fields are not configured using the config file (missing fields), - // we set values for these fields with defaults - // The issue with not being able to use non-raw entries is because of fields lockout threshold - // and disable lockout. We cannot differentiate using non-raw entries if the user configured these fields - // with values (0 and false) or if the the user did not configure these values in config file at all. - // The raw fields are set to nil after setting missing values in setNilValuesForRawUserLockoutFields function - userLockoutsMap = setMissingUserLockoutValuesInMap(userLockoutsMap) - for _, userLockoutValues := range userLockoutsMap { - result.UserLockouts = append(result.UserLockouts, userLockoutValues) - } - return nil -} - -// setUserLockoutValueAllInMap sets default user lockout values for key "all" (all auth methods) -// for user lockout fields that are not configured using config file -func setUserLockoutValueAllInMap(userLockoutAll *UserLockout) *UserLockout { - if userLockoutAll.Type == "" { - userLockoutAll.Type = "all" - } - if userLockoutAll.LockoutThresholdRaw == nil { - userLockoutAll.LockoutThreshold = UserLockoutThresholdDefault - } - if userLockoutAll.LockoutDurationRaw == nil { - userLockoutAll.LockoutDuration = UserLockoutDurationDefault - } - if userLockoutAll.LockoutCounterResetRaw == nil { - userLockoutAll.LockoutCounterReset = UserLockoutCounterResetDefault - } - if userLockoutAll.DisableLockoutRaw == nil { - userLockoutAll.DisableLockout = DisableUserLockoutDefault - } - return setNilValuesForRawUserLockoutFields(userLockoutAll) -} - -// setDefaultUserLockoutValuesInMap sets missing user lockout fields for auth methods -// with default values (from key "all") that are not configured using config file -func setMissingUserLockoutValuesInMap(userLockoutsMap map[string]*UserLockout) map[string]*UserLockout { - // set values for "all" key with default values for "all" user lockout fields that are not configured - // the "all" key values will be used as default values for other auth methods - userLockoutAll, ok := userLockoutsMap["all"] - switch ok { - case true: - userLockoutsMap["all"] = setUserLockoutValueAllInMap(userLockoutAll) - default: - userLockoutsMap["all"] = setUserLockoutValueAllInMap(&UserLockout{}) - } - - for _, userLockoutAuth := range userLockoutsMap { - if userLockoutAuth.Type == "all" { - continue - } - // set missing values - if userLockoutAuth.LockoutThresholdRaw == nil { - userLockoutAuth.LockoutThreshold = userLockoutsMap["all"].LockoutThreshold - } - if userLockoutAuth.LockoutDurationRaw == nil { - userLockoutAuth.LockoutDuration = userLockoutsMap["all"].LockoutDuration - } - if userLockoutAuth.LockoutCounterResetRaw == nil { - userLockoutAuth.LockoutCounterReset = userLockoutsMap["all"].LockoutCounterReset - } - if userLockoutAuth.DisableLockoutRaw == nil { - userLockoutAuth.DisableLockout = userLockoutsMap["all"].DisableLockout - } - userLockoutAuth = setNilValuesForRawUserLockoutFields(userLockoutAuth) - userLockoutsMap[userLockoutAuth.Type] = userLockoutAuth - } - return userLockoutsMap -} - -// setNilValuesForRawUserLockoutFields sets nil values for user lockout Raw fields -func setNilValuesForRawUserLockoutFields(userLockout *UserLockout) *UserLockout { - userLockout.LockoutThresholdRaw = nil - userLockout.LockoutDurationRaw = nil - userLockout.LockoutCounterResetRaw = nil - userLockout.DisableLockoutRaw = nil - return userLockout -} diff --git a/internalshared/configutil/userlockout_test.go b/internalshared/configutil/userlockout_test.go deleted file mode 100644 index db05441c684d4..0000000000000 --- a/internalshared/configutil/userlockout_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package configutil - -import ( - "reflect" - "testing" - "time" -) - -func TestParseUserLockout(t *testing.T) { - t.Parallel() - t.Run("Missing user lockout block in config file", func(t *testing.T) { - t.Parallel() - inputConfig := make(map[string]*UserLockout) - expectedConfig := make(map[string]*UserLockout) - expectedConfigall := &UserLockout{} - expectedConfigall.Type = "all" - expectedConfigall.LockoutThreshold = UserLockoutThresholdDefault - expectedConfigall.LockoutDuration = UserLockoutDurationDefault - expectedConfigall.LockoutCounterReset = UserLockoutCounterResetDefault - expectedConfigall.DisableLockout = DisableUserLockoutDefault - expectedConfig["all"] = expectedConfigall - - outputConfig := setMissingUserLockoutValuesInMap(inputConfig) - if !reflect.DeepEqual(expectedConfig["all"], outputConfig["all"]) { - t.Errorf("user lockout config: expected %#v\nactual %#v", expectedConfig["all"], outputConfig["all"]) - } - }) - t.Run("setting default lockout counter reset and lockout duration for userpass in config ", func(t *testing.T) { - t.Parallel() - // input user lockout in config file - inputConfig := make(map[string]*UserLockout) - configAll := &UserLockout{} - configAll.Type = "all" - configAll.LockoutCounterReset = 20 * time.Minute - configAll.LockoutCounterResetRaw = "1200000000000" - inputConfig["all"] = configAll - configUserpass := &UserLockout{} - configUserpass.Type = "userpass" - configUserpass.LockoutDuration = 10 * time.Minute - configUserpass.LockoutDurationRaw = "600000000000" - inputConfig["userpass"] = configUserpass - - expectedConfig := make(map[string]*UserLockout) - expectedConfigall := &UserLockout{} - expectedConfigUserpass := &UserLockout{} - // expected default values - expectedConfigall.Type = "all" - expectedConfigall.LockoutThreshold = UserLockoutThresholdDefault - expectedConfigall.LockoutDuration = UserLockoutDurationDefault - expectedConfigall.LockoutCounterReset = 20 * time.Minute - expectedConfigall.DisableLockout = DisableUserLockoutDefault - // expected values for userpass - expectedConfigUserpass.Type = "userpass" - expectedConfigUserpass.LockoutThreshold = UserLockoutThresholdDefault - expectedConfigUserpass.LockoutDuration = 10 * time.Minute - expectedConfigUserpass.LockoutCounterReset = 20 * time.Minute - expectedConfigUserpass.DisableLockout = DisableUserLockoutDefault - expectedConfig["all"] = expectedConfigall - expectedConfig["userpass"] = expectedConfigUserpass - - outputConfig := setMissingUserLockoutValuesInMap(inputConfig) - if !reflect.DeepEqual(expectedConfig["all"], outputConfig["all"]) { - t.Errorf("user lockout config: expected %#v\nactual %#v", expectedConfig["all"], outputConfig["all"]) - } - if !reflect.DeepEqual(expectedConfig["userpass"], outputConfig["userpass"]) { - t.Errorf("user lockout config: expected %#v\nactual %#v", expectedConfig["userpass"], outputConfig["userpass"]) - } - }) -} diff --git a/internalshared/listenerutil/bufconn.go b/internalshared/listenerutil/bufconn.go index 54af0a783e20d..d3d9d653c576d 100644 --- a/internalshared/listenerutil/bufconn.go +++ b/internalshared/listenerutil/bufconn.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package listenerutil import ( diff --git a/internalshared/listenerutil/listener.go b/internalshared/listenerutil/listener.go index 9a4edb45dcdce..6095713be5d24 100644 --- a/internalshared/listenerutil/listener.go +++ b/internalshared/listenerutil/listener.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package listenerutil import ( diff --git a/internalshared/listenerutil/listener_test.go b/internalshared/listenerutil/listener_test.go index 6219727e19075..3c2afa593aed5 100644 --- a/internalshared/listenerutil/listener_test.go +++ b/internalshared/listenerutil/listener_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package listenerutil import ( diff --git a/main.go b/main.go index 0417bd98773be..bc8a8651f8f9c 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main // import "github.com/hashicorp/vault" import ( diff --git a/main_test.go b/main_test.go index 36398339dfcf2..4c4c79a2cb8e2 100644 --- a/main_test.go +++ b/main_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main // import "github.com/hashicorp/vault" // This file is intentionally empty to force early versions of Go diff --git a/physical/aerospike/aerospike.go b/physical/aerospike/aerospike.go index 81aab224ac24e..b323ccd344d40 100644 --- a/physical/aerospike/aerospike.go +++ b/physical/aerospike/aerospike.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aerospike import ( diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go index 6324b5e6d28b8..653f00ff6c5c4 100644 --- a/physical/aerospike/aerospike_test.go +++ b/physical/aerospike/aerospike_test.go @@ -1,27 +1,18 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package aerospike import ( "context" - "math/bits" - "runtime" - "strings" "testing" "time" aero "github.com/aerospike/aerospike-client-go/v5" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) func TestAerospikeBackend(t *testing.T) { - if bits.UintSize == 32 { - t.Skip("Aerospike storage is only supported on 64-bit architectures") - } cleanup, config := prepareAerospikeContainer(t) defer cleanup() @@ -49,13 +40,8 @@ type aerospikeConfig struct { } func prepareAerospikeContainer(t *testing.T) (func(), *aerospikeConfig) { - // Skipping on ARM, as this image can't run on ARM architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/aerospike/aerospike-server", + ImageRepo: "aerospike/aerospike-server", ContainerName: "aerospikedb", ImageTag: "5.6.0.5", Ports: []string{"3000/tcp", "3001/tcp", "3002/tcp", "3003/tcp"}, diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go index d32d14b3b00ac..40f3da6d5643c 100644 --- a/physical/alicloudoss/alicloudoss.go +++ b/physical/alicloudoss/alicloudoss.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package alicloudoss import ( diff --git a/physical/alicloudoss/alicloudoss_test.go b/physical/alicloudoss/alicloudoss_test.go index 1b098bd344051..ad292da4f6b7b 100644 --- a/physical/alicloudoss/alicloudoss_test.go +++ b/physical/alicloudoss/alicloudoss_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package alicloudoss import ( diff --git a/physical/azure/azure.go b/physical/azure/azure.go index fe884491dbf5f..eb158a993d8a2 100644 --- a/physical/azure/azure.go +++ b/physical/azure/azure.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package azure import ( diff --git a/physical/azure/azure_test.go b/physical/azure/azure_test.go index a004c83357637..20392a21c688b 100644 --- a/physical/azure/azure_test.go +++ b/physical/azure/azure_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package azure import ( diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index fc9261a550276..84c2ab149db23 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go index 9466d0a708452..e9fe7bc059a5b 100644 --- a/physical/cassandra/cassandra_test.go +++ b/physical/cassandra/cassandra_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go index 38f935cdebd5c..385074d917cbd 100644 --- a/physical/cockroachdb/cockroachdb.go +++ b/physical/cockroachdb/cockroachdb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cockroachdb import ( diff --git a/physical/cockroachdb/cockroachdb_ha.go b/physical/cockroachdb/cockroachdb_ha.go index 39b617546a87d..1f22465d0810d 100644 --- a/physical/cockroachdb/cockroachdb_ha.go +++ b/physical/cockroachdb/cockroachdb_ha.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cockroachdb import ( diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go index ab652554ae380..69a3c565323f3 100644 --- a/physical/cockroachdb/cockroachdb_test.go +++ b/physical/cockroachdb/cockroachdb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cockroachdb import ( @@ -9,12 +6,10 @@ import ( "fmt" "net/url" "os" - "runtime" - "strings" "testing" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) @@ -28,11 +23,6 @@ type Config struct { var _ docker.ServiceConfig = &Config{} func prepareCockroachDBTestContainer(t *testing.T) (func(), *Config) { - // Skipping, as this image can't run on arm architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as CockroachDB 1.0 is not supported on ARM architectures") - } - if retURL := os.Getenv("CR_URL"); retURL != "" { s, err := docker.NewServiceURLParse(retURL) if err != nil { @@ -46,7 +36,7 @@ func prepareCockroachDBTestContainer(t *testing.T) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/cockroachdb/cockroach", + ImageRepo: "cockroachdb/cockroach", ImageTag: "release-1.0", ContainerName: "cockroachdb", Cmd: []string{"start", "--insecure"}, diff --git a/physical/cockroachdb/keywords.go b/physical/cockroachdb/keywords.go index f44089f9f76f7..390dc63f8dfca 100644 --- a/physical/cockroachdb/keywords.go +++ b/physical/cockroachdb/keywords.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cockroachdb // sqlKeywords is a reference of all of the keywords that we do not allow for use as the table name diff --git a/physical/consul/consul.go b/physical/consul/consul.go index b17dbc4c16930..f30403468c6bb 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -10,7 +7,6 @@ import ( "net/http" "strconv" "strings" - "sync/atomic" "time" "github.com/armon/go-metrics" @@ -33,9 +29,6 @@ const ( // consistencyModeStrong is the configuration value used to tell // consul to use strong consistency. consistencyModeStrong = "strong" - - // nonExistentKey is used as part of a capabilities check against Consul - nonExistentKey = "F35C28E1-7035-40BB-B865-6BED9E3A1B28" ) // Verify ConsulBackend satisfies the correct interfaces @@ -44,14 +37,11 @@ var ( _ physical.HABackend = (*ConsulBackend)(nil) _ physical.Lock = (*ConsulLock)(nil) _ physical.Transactional = (*ConsulBackend)(nil) - - GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in consul backend") ) // ConsulBackend is a physical backend that stores data at specific // prefix within Consul. It is used for most production situations as // it allows Vault to run on multiple machines in a highly-available manner. -// failGetInTxn is only used in tests. type ConsulBackend struct { client *api.Client path string @@ -61,7 +51,6 @@ type ConsulBackend struct { consistencyMode string sessionTTL string lockWaitTime time.Duration - failGetInTxn *uint32 } // NewConsulBackend constructs a Consul backend using the given API client @@ -158,9 +147,9 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe txn: client.Txn(), permitPool: physical.NewPermitPool(maxParInt), consistencyMode: consistencyMode, - sessionTTL: sessionTTL, - lockWaitTime: lockWaitTime, - failGetInTxn: new(uint32), + + sessionTTL: sessionTTL, + lockWaitTime: lockWaitTime, } return c, nil @@ -235,33 +224,6 @@ func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string return nil } -// ExpandedCapabilitiesAvailable tests to see if Consul has KVGetOrEmpty and 128 entries per transaction available -func (c *ConsulBackend) ExpandedCapabilitiesAvailable(ctx context.Context) bool { - available := false - - maxEntries := 128 - ops := make([]*api.TxnOp, maxEntries) - for i := 0; i < maxEntries; i++ { - ops[i] = &api.TxnOp{KV: &api.KVTxnOp{ - Key: c.path + nonExistentKey, - Verb: api.KVGetOrEmpty, - }} - } - - c.permitPool.Acquire() - defer c.permitPool.Release() - - queryOpts := &api.QueryOptions{} - queryOpts = queryOpts.WithContext(ctx) - - ok, resp, _, err := c.txn.Txn(ops, queryOpts) - if ok && len(resp.Errors) == 0 && err == nil { - available = true - } - - return available -} - // Transaction is used to run multiple entries via a transaction. func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { if len(txns) == 0 { @@ -269,13 +231,6 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt } defer metrics.MeasureSince([]string{"consul", "transaction"}, time.Now()) - failGetInTxn := atomic.LoadUint32(c.failGetInTxn) - for _, t := range txns { - if t.Operation == physical.GetOperation && failGetInTxn != 0 { - return GetInTxnDisabledError - } - } - ops := make([]*api.TxnOp, 0, len(txns)) for _, t := range txns { o, err := c.makeApiTxn(t) @@ -346,7 +301,8 @@ func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) { } switch txn.Operation { case physical.GetOperation: - op.Verb = api.KVGetOrEmpty + // TODO: This is currently broken. Once Consul releases 1.14, this should be updated to use api.KVGetOrEmpty + op.Verb = api.KVGet case physical.DeleteOperation: op.Verb = api.KVDelete case physical.PutOperation: @@ -453,14 +409,6 @@ func (c *ConsulBackend) List(ctx context.Context, prefix string) ([]string, erro return out, err } -func (c *ConsulBackend) FailGetInTxn(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(c.failGetInTxn, val) -} - // LockWith is used for mutual exclusion based on the given key. func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { // Create the lock diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index b0a16ce85a37f..b2a06e612520b 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( @@ -254,41 +251,11 @@ func TestConsul_TooLarge(t *testing.T) { } } -func TestConsul_ExpandedCapabilitiesAvailable(t *testing.T) { - testCases := map[string]bool{ - "1.13.5": false, - "1.14.3": true, - } - - for version, shouldBeAvailable := range testCases { - t.Run(version, func(t *testing.T) { - cleanup, config := consul.PrepareTestContainer(t, version, false, true) - defer cleanup() - - logger := logging.NewVaultLogger(log.Debug) - backendConfig := map[string]string{ - "address": config.Address(), - "token": config.Token, - "path": "vault/", - "max_parallel": "-1", - } - - be, err := NewConsulBackend(backendConfig, logger) - if err != nil { - t.Fatal(err) - } - b := be.(*ConsulBackend) - - isAvailable := b.ExpandedCapabilitiesAvailable(context.Background()) - if isAvailable != shouldBeAvailable { - t.Errorf("%t != %t, version %s\n", isAvailable, shouldBeAvailable, version) - } - }) - } -} - func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *testing.T) { - cleanup, config := consul.PrepareTestContainer(t, "1.14.2", false, true) + // TODO: unskip this after Consul releases 1.14 and we update our API dep. It currently fails but should pass with Consul 1.14 + t.SkipNow() + + cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) defer cleanup() client, err := api.NewClient(config.APIConfig()) @@ -349,7 +316,10 @@ func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *test // TestConsul_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the // consul backend will populate values for any transactions that are Get operations. func TestConsul_TransactionalBackend_GetTransactions(t *testing.T) { - cleanup, config := consul.PrepareTestContainer(t, "1.14.2", false, true) + // TODO: unskip this after Consul releases 1.14 and we update our API dep. It currently fails but should pass with Consul 1.14 + t.SkipNow() + + cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) defer cleanup() client, err := api.NewClient(config.APIConfig()) diff --git a/physical/consul/helpers.go b/physical/consul/helpers.go index ce7c47fdcc9d9..71c30b310068e 100644 --- a/physical/consul/helpers.go +++ b/physical/consul/helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index 9c54b0494ae79..86fc139ed92d0 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package couchdb import ( diff --git a/physical/couchdb/couchdb_test.go b/physical/couchdb/couchdb_test.go index d7ae4be27766e..4205f8e64b1ee 100644 --- a/physical/couchdb/couchdb_test.go +++ b/physical/couchdb/couchdb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package couchdb import ( @@ -10,13 +7,12 @@ import ( "net/http" "net/url" "os" - "runtime" "strings" "testing" "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) @@ -79,13 +75,6 @@ func (c couchDB) URL() *url.URL { var _ docker.ServiceConfig = &couchDB{} func prepareCouchdbDBTestContainer(t *testing.T) (func(), *couchDB) { - // ARM64 is only supported on CouchDB 2 and above. If we update - // our image and support to 2 and above, we can unskip these: - // https://hub.docker.com/r/arm64v8/couchdb/ - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as CouchDB 1.6 is not supported on ARM architectures") - } - // If environment variable is set, assume caller wants to target a real // DynamoDB. if os.Getenv("COUCHDB_ENDPOINT") != "" { @@ -97,8 +86,7 @@ func prepareCouchdbDBTestContainer(t *testing.T) (func(), *couchDB) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ContainerName: "couchdb", - ImageRepo: "docker.mirror.hashicorp.services/library/couchdb", + ImageRepo: "couchdb", ImageTag: "1.6", Ports: []string{"5984/tcp"}, DoNotAutoRemove: true, diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index 591c65cf710a6..18c2bbf50a4d1 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dynamodb import ( diff --git a/physical/dynamodb/dynamodb_test.go b/physical/dynamodb/dynamodb_test.go index c4d25b9d7cb61..ae6722e245a46 100644 --- a/physical/dynamodb/dynamodb_test.go +++ b/physical/dynamodb/dynamodb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dynamodb import ( @@ -10,14 +7,12 @@ import ( "net/http" "net/url" "os" - "runtime" - "strings" "testing" "time" "github.com/go-test/deep" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" @@ -375,11 +370,6 @@ type Config struct { var _ docker.ServiceConfig = &Config{} func prepareDynamoDBTestContainer(t *testing.T) (func(), *Config) { - // Skipping on ARM, as this image can't run on ARM architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - // If environment variable is set, assume caller wants to target a real // DynamoDB. if endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT"); endpoint != "" { @@ -391,7 +381,7 @@ func prepareDynamoDBTestContainer(t *testing.T) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/cnadiminti/dynamodb-local", + ImageRepo: "cnadiminti/dynamodb-local", ImageTag: "latest", ContainerName: "dynamodb", Ports: []string{"8000/tcp"}, diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go index f17a552b54c9a..5bb8d4a31c1b8 100644 --- a/physical/etcd/etcd.go +++ b/physical/etcd/etcd.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package etcd import ( diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index 57a838a697431..486d448febf7a 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package etcd import ( diff --git a/physical/etcd/etcd3_test.go b/physical/etcd/etcd3_test.go index a2de6314dd6f3..71150a698c637 100644 --- a/physical/etcd/etcd3_test.go +++ b/physical/etcd/etcd3_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package etcd import ( diff --git a/physical/foundationdb/fdb-go-install.sh b/physical/foundationdb/fdb-go-install.sh index 4b2c125223144..550d5cf4d14e7 100755 --- a/physical/foundationdb/fdb-go-install.sh +++ b/physical/foundationdb/fdb-go-install.sh @@ -1,7 +1,4 @@ #!/bin/bash -eu -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # # fdb-go-install.sh # diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go index 03f984f560392..56305b2fbf7df 100644 --- a/physical/foundationdb/foundationdb.go +++ b/physical/foundationdb/foundationdb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build foundationdb package foundationdb diff --git a/physical/foundationdb/foundationdb_test.go b/physical/foundationdb/foundationdb_test.go index ecd6aa8234e7e..c6fe75d5ebfd2 100644 --- a/physical/foundationdb/foundationdb_test.go +++ b/physical/foundationdb/foundationdb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build foundationdb package foundationdb diff --git a/physical/foundationdb/foundationdbstub.go b/physical/foundationdb/foundationdbstub.go index 283ca0969f0f5..4fc2734e50b1f 100644 --- a/physical/foundationdb/foundationdbstub.go +++ b/physical/foundationdb/foundationdbstub.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !foundationdb package foundationdb diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go index 4a3f5bdf4961a..f38ffa53d3fa7 100644 --- a/physical/gcs/gcs.go +++ b/physical/gcs/gcs.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package gcs import ( @@ -15,12 +12,13 @@ import ( "strings" "time" - "cloud.google.com/go/storage" metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/useragent" "github.com/hashicorp/vault/sdk/physical" + + "cloud.google.com/go/storage" "google.golang.org/api/iterator" "google.golang.org/api/option" ) diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go index 2e4e762a7469a..3a8e45d981905 100644 --- a/physical/gcs/gcs_ha.go +++ b/physical/gcs/gcs_ha.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package gcs import ( diff --git a/physical/gcs/gcs_ha_test.go b/physical/gcs/gcs_ha_test.go index cdd59e731da9f..8e1b91e777939 100644 --- a/physical/gcs/gcs_ha_test.go +++ b/physical/gcs/gcs_ha_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package gcs import ( diff --git a/physical/gcs/gcs_test.go b/physical/gcs/gcs_test.go index 332ba35d79ba5..4caab730faa72 100644 --- a/physical/gcs/gcs_test.go +++ b/physical/gcs/gcs_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package gcs import ( diff --git a/physical/manta/manta.go b/physical/manta/manta.go index cfb0770144abb..390683d3695fc 100644 --- a/physical/manta/manta.go +++ b/physical/manta/manta.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package manta import ( diff --git a/physical/manta/manta_test.go b/physical/manta/manta_test.go index 67d50fe71b14c..8db52c53ab0e0 100644 --- a/physical/manta/manta_test.go +++ b/physical/manta/manta_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package manta import ( diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go index 2859a65ef3243..acce4b2b9fd1e 100644 --- a/physical/mssql/mssql.go +++ b/physical/mssql/mssql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mssql import ( @@ -21,8 +18,10 @@ import ( ) // Verify MSSQLBackend satisfies the correct interfaces -var _ physical.Backend = (*MSSQLBackend)(nil) -var identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) +var ( + _ physical.Backend = (*MSSQLBackend)(nil) + identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) +) type MSSQLBackend struct { dbTable string diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index 2324ff5c03f28..e026ff6fa2fd5 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mssql import ( diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go index 225882f7575b1..29bb3928ab814 100644 --- a/physical/mysql/mysql.go +++ b/physical/mysql/mysql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysql import ( diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go index b13c7e4a57c4e..86373e91629e2 100644 --- a/physical/mysql/mysql_test.go +++ b/physical/mysql/mysql_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysql import ( diff --git a/physical/oci/oci_ha_test.go b/physical/oci/oci_ha_test.go index b7213b9ceb28e..6dfaba9768c38 100644 --- a/physical/oci/oci_ha_test.go +++ b/physical/oci/oci_ha_test.go @@ -16,11 +16,6 @@ func TestOCIHABackend(t *testing.T) { if os.Getenv("VAULT_ACC") == "" { t.SkipNow() } - - if !hasOCICredentials() { - t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.") - } - bucketName, _ := uuid.GenerateUUID() configProvider := common.DefaultConfigProvider() objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider) diff --git a/physical/oci/oci_test.go b/physical/oci/oci_test.go index e20b808fdc60e..46edcb8c7247e 100644 --- a/physical/oci/oci_test.go +++ b/physical/oci/oci_test.go @@ -19,11 +19,6 @@ func TestOCIBackend(t *testing.T) { if os.Getenv("VAULT_ACC") == "" { t.SkipNow() } - - if !hasOCICredentials() { - t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.") - } - bucketName, _ := uuid.GenerateUUID() configProvider := common.DefaultConfigProvider() objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider) @@ -92,14 +87,3 @@ func getNamespaceName(objectStorageClient objectstorage.ObjectStorageClient, t * nameSpaceName := *response.Value return nameSpaceName } - -func hasOCICredentials() bool { - configProvider := common.DefaultConfigProvider() - - _, err := configProvider.KeyID() - if err != nil { - return false - } - - return true -} diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index a70133066750f..ed4c883440cab 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package postgresql import ( diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 5dec40aba5d9d..15d1ab35076d9 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package postgresql import ( diff --git a/physical/raft/bolt_32bit_test.go b/physical/raft/bolt_32bit_test.go index 7694d82f5cabe..ccb1641ea299b 100644 --- a/physical/raft/bolt_32bit_test.go +++ b/physical/raft/bolt_32bit_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build 386 || arm package raft diff --git a/physical/raft/bolt_64bit_test.go b/physical/raft/bolt_64bit_test.go index c4b89b8cdc145..d88c01eed5947 100644 --- a/physical/raft/bolt_64bit_test.go +++ b/physical/raft/bolt_64bit_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !386 && !arm package raft diff --git a/physical/raft/bolt_linux.go b/physical/raft/bolt_linux.go index b7774c61eaa89..4ea13e2a39860 100644 --- a/physical/raft/bolt_linux.go +++ b/physical/raft/bolt_linux.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( diff --git a/physical/raft/chunking_test.go b/physical/raft/chunking_test.go index 64f83e6b8daa0..bdd950b566d27 100644 --- a/physical/raft/chunking_test.go +++ b/physical/raft/chunking_test.go @@ -1,19 +1,16 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( "bytes" "context" - "fmt" + fmt "fmt" "os" "testing" - "github.com/golang/protobuf/proto" + proto "github.com/golang/protobuf/proto" "github.com/hashicorp/go-raftchunking" raftchunkingtypes "github.com/hashicorp/go-raftchunking/types" - "github.com/hashicorp/go-uuid" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/vault/sdk/physical" @@ -29,7 +26,7 @@ func TestRaft_Chunking_Lifecycle(t *testing.T) { require := require.New(t) assert := assert.New(t) - b, dir := GetRaft(t, true, false) + b, dir := getRaft(t, true, false) defer os.RemoveAll(dir) t.Log("applying configuration") @@ -114,7 +111,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) { require := require.New(t) assert := assert.New(t) - b, dir := GetRaft(t, true, false) + b, dir := getRaft(t, true, false) defer os.RemoveAll(dir) t.Log("applying configuration") @@ -188,7 +185,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) { func TestRaft_Chunking_AppliedIndex(t *testing.T) { t.Parallel() - raft, dir := GetRaft(t, true, false) + raft, dir := getRaft(t, true, false) defer os.RemoveAll(dir) // Lower the size for tests diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index a8882812665ec..c2d9953ce3f43 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( diff --git a/physical/raft/fsm_test.go b/physical/raft/fsm_test.go index ba0e382f09775..e80a6ce5573ff 100644 --- a/physical/raft/fsm_test.go +++ b/physical/raft/fsm_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( diff --git a/physical/raft/msgpack.go b/physical/raft/msgpack.go index 88ac74d594984..299dd8e0a98df 100644 --- a/physical/raft/msgpack.go +++ b/physical/raft/msgpack.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft // If we downgrade msgpack from v1.1.5 to v0.5.5, everything will still diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 1060eda7aa6e1..1b052595d80ec 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( @@ -15,7 +12,6 @@ import ( "path/filepath" "strconv" "sync" - "sync/atomic" "time" "github.com/armon/go-metrics" @@ -35,9 +31,9 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/version" "github.com/hashicorp/vault/vault/cluster" "github.com/hashicorp/vault/vault/seal" - "github.com/hashicorp/vault/version" bolt "go.etcd.io/bbolt" ) @@ -68,12 +64,12 @@ var ( // This is used to reduce disk I/O for the recently committed entries. raftLogCacheSize = 512 - raftState = "raft/" - peersFileName = "peers.json" + raftState = "raft/" + peersFileName = "peers.json" + restoreOpDelayDuration = 5 * time.Second - defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) - GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in raft backend") + defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) ) // RaftBackend implements the backend interfaces and uses the raft protocol to @@ -185,7 +181,6 @@ type RaftBackend struct { nonVoter bool effectiveSDKVersion string - failGetInTxn *uint32 } // LeaderJoinInfo contains information required by a node to join itself as a @@ -520,7 +515,6 @@ func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend redundancyZone: conf["autopilot_redundancy_zone"], nonVoter: nonVoter, upgradeVersion: upgradeVersion, - failGetInTxn: new(uint32), }, nil } @@ -572,14 +566,6 @@ func (b *RaftBackend) Close() error { return nil } -func (b *RaftBackend) FailGetInTxn(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(b.failGetInTxn, val) -} - func (b *RaftBackend) SetEffectiveSDKVersion(sdkVersion string) { b.l.Lock() b.effectiveSDKVersion = sdkVersion @@ -1367,7 +1353,7 @@ func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { // SnapshotHTTP is a wrapper for Snapshot that sends the snapshot as an HTTP // response. -func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, access seal.Access) error { +func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, access *seal.Access) error { out.Header().Add("Content-Disposition", "attachment") out.Header().Add("Content-Type", "application/gzip") @@ -1377,7 +1363,7 @@ func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, access seal. // Snapshot takes a raft snapshot, packages it into a archive file and writes it // to the provided writer. Seal access is used to encrypt the SHASUM file so we // can validate the snapshot was taken using the same root keys or not. -func (b *RaftBackend) Snapshot(out io.Writer, access seal.Access) error { +func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { b.l.RLock() defer b.l.RUnlock() @@ -1401,7 +1387,7 @@ func (b *RaftBackend) Snapshot(out io.Writer, access seal.Access) error { // access is used to decrypt the SHASUM file in the archive to ensure this // snapshot has the same root key as the running instance. If the provided // access is nil then it will skip that validation. -func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access *seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { b.l.RLock() defer b.l.RUnlock() @@ -1577,13 +1563,6 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry return err } - failGetInTxn := atomic.LoadUint32(b.failGetInTxn) - for _, t := range txns { - if t.Operation == physical.GetOperation && failGetInTxn != 0 { - return GetInTxnDisabledError - } - } - txnMap := make(map[string]*physical.TxnEntry) command := &LogData{ @@ -1894,7 +1873,7 @@ func (l *RaftLock) Value() (bool, string, error) { // sealer implements the snapshot.Sealer interface and is used in the snapshot // process for encrypting/decrypting the SHASUM file in snapshot archives. type sealer struct { - access seal.Access + access *seal.Access } // Seal encrypts the data with using the seal access object. diff --git a/physical/raft/raft_autopilot.go b/physical/raft/raft_autopilot.go index ca0ee759e93ac..a67683f2b0dbf 100644 --- a/physical/raft/raft_autopilot.go +++ b/physical/raft/raft_autopilot.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( @@ -296,7 +293,7 @@ type Delegate struct { emptyVersionLogs map[raft.ServerID]struct{} } -func NewDelegate(b *RaftBackend) *Delegate { +func newDelegate(b *RaftBackend) *Delegate { return &Delegate{ RaftBackend: b, inflightRemovals: make(map[raft.ServerID]bool), @@ -389,7 +386,6 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { return nil } - apServerStates := d.autopilot.GetState().Servers servers := future.Configuration().Servers serverIDs := make([]string, 0, len(servers)) for _, server := range servers { @@ -433,19 +429,6 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { Ext: d.autopilotServerExt(state), } - // As KnownServers is a delegate called by autopilot let's check if we already - // had this data in the correct format and use it. If we don't (which sounds a - // bit sad, unless this ISN'T a voter) then as a fail-safe, let's try what we've - // done elsewhere in code to check the desired suffrage and manually set NodeType - // based on whether that's a voter or not. If we don't do either of these - // things, NodeType isn't set which means technically it's not a voter. - // It shouldn't be a voter and end up in this state. - if apServerState, found := apServerStates[raft.ServerID(id)]; found && apServerState.Server.NodeType != "" { - server.NodeType = apServerState.Server.NodeType - } else if state.DesiredSuffrage == "voter" { - server.NodeType = autopilot.NodeVoter - } - switch state.IsDead.Load() { case true: d.logger.Debug("informing autopilot that the node left", "id", id) @@ -463,7 +446,6 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { Name: d.localID, RaftVersion: raft.ProtocolVersionMax, NodeStatus: autopilot.NodeAlive, - NodeType: autopilot.NodeVoter, // The leader must be a voter Meta: d.meta(&FollowerState{ UpgradeVersion: d.EffectiveVersion(), RedundancyZone: d.RedundancyZone(), @@ -573,13 +555,6 @@ func (b *RaftBackend) startFollowerHeartbeatTracker() { } for range tickerCh { b.l.RLock() - if b.raft == nil { - // We could be racing with teardown, which will stop the ticker - // but that doesn't guarantee that we won't reach this line with a nil - // b.raft. - b.l.RUnlock() - return - } b.followerStates.l.RLock() myAppliedIndex := b.raft.AppliedIndex() for peerID, state := range b.followerStates.followers { @@ -839,7 +814,7 @@ func (b *RaftBackend) SetupAutopilot(ctx context.Context, storageConfig *Autopil if b.autopilotUpdateInterval != 0 { options = append(options, autopilot.WithUpdateInterval(b.autopilotUpdateInterval)) } - b.autopilot = autopilot.New(b.raft, NewDelegate(b), options...) + b.autopilot = autopilot.New(b.raft, newDelegate(b), options...) b.followerStates = followerStates b.followerHeartbeatTicker = time.NewTicker(1 * time.Second) diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go index 73d0ce32c5434..50171fd68c3df 100644 --- a/physical/raft/raft_test.go +++ b/physical/raft/raft_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( @@ -30,6 +27,73 @@ import ( bolt "go.etcd.io/bbolt" ) +func getRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + t.Logf("raft dir: %s", raftDir) + + return getRaftWithDir(t, bootstrap, noStoreState, raftDir) +} + +func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir string) (*RaftBackend, string) { + id, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: fmt.Sprintf("raft-%s", id), + Level: hclog.Trace, + }) + logger.Info("raft dir", "dir", raftDir) + + conf := map[string]string{ + "path": raftDir, + "trailing_logs": "100", + "node_id": id, + } + + if noStoreState { + conf["doNotStoreLatestState"] = "" + } + + backendRaw, err := NewRaftBackend(conf, logger) + if err != nil { + t.Fatal(err) + } + backend := backendRaw.(*RaftBackend) + + if bootstrap { + err = backend.Bootstrap([]Peer{ + { + ID: backend.NodeID(), + Address: backend.NodeID(), + }, + }) + if err != nil { + t.Fatal(err) + } + + err = backend.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + + for { + if backend.raft.AppliedIndex() >= 2 { + break + } + } + + } + + backend.DisableAutopilot() + + return backend, raftDir +} + func connectPeers(nodes ...*RaftBackend) { for _, node := range nodes { for _, peer := range nodes { @@ -156,7 +220,7 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { } func TestRaft_Backend(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) physical.ExerciseBackend(t, b) @@ -252,7 +316,7 @@ func TestRaft_ParseNonVoter(t *testing.T) { } func TestRaft_Backend_LargeKey(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) key, err := base62.Random(bolt.MaxKeySize + 1) @@ -280,7 +344,7 @@ func TestRaft_Backend_LargeKey(t *testing.T) { } func TestRaft_Backend_LargeValue(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) value := make([]byte, defaultMaxEntrySize+1) @@ -308,7 +372,7 @@ func TestRaft_Backend_LargeValue(t *testing.T) { // TestRaft_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the // raft backend will populate values for any transactions that are Get operations. func TestRaft_TransactionalBackend_GetTransactions(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) ctx := context.Background() @@ -365,7 +429,7 @@ func TestRaft_TransactionalBackend_GetTransactions(t *testing.T) { } func TestRaft_TransactionalBackend_LargeKey(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) value := make([]byte, defaultMaxEntrySize+1) @@ -404,7 +468,7 @@ func TestRaft_TransactionalBackend_LargeKey(t *testing.T) { } func TestRaft_TransactionalBackend_LargeValue(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) value := make([]byte, defaultMaxEntrySize+1) @@ -439,14 +503,14 @@ func TestRaft_TransactionalBackend_LargeValue(t *testing.T) { } func TestRaft_Backend_ListPrefix(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) physical.ExerciseBackend_ListPrefix(t, b) } func TestRaft_TransactionalBackend(t *testing.T) { - b, dir := GetRaft(t, true, true) + b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) physical.ExerciseTransactionalBackend(t, b) @@ -454,9 +518,9 @@ func TestRaft_TransactionalBackend(t *testing.T) { func TestRaft_HABackend(t *testing.T) { t.Skip() - raft, dir := GetRaft(t, true, true) + raft, dir := getRaft(t, true, true) defer os.RemoveAll(dir) - raft2, dir2 := GetRaft(t, false, true) + raft2, dir2 := getRaft(t, false, true) defer os.RemoveAll(dir2) // Add raft2 to the cluster @@ -466,9 +530,9 @@ func TestRaft_HABackend(t *testing.T) { } func TestRaft_Backend_ThreeNode(t *testing.T) { - raft1, dir := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) + raft1, dir := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) defer os.RemoveAll(dir) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -489,9 +553,9 @@ func TestRaft_Backend_ThreeNode(t *testing.T) { func TestRaft_GetOfflineConfig(t *testing.T) { // Create 3 raft nodes - raft1, dir1 := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) + raft1, dir1 := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) defer os.RemoveAll(dir1) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -527,10 +591,10 @@ func TestRaft_GetOfflineConfig(t *testing.T) { func TestRaft_Recovery(t *testing.T) { // Create 4 raft nodes - raft1, dir1 := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) - raft4, dir4 := GetRaft(t, false, true) + raft1, dir1 := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) + raft4, dir4 := getRaft(t, false, true) defer os.RemoveAll(dir1) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -614,9 +678,9 @@ func TestRaft_Recovery(t *testing.T) { } func TestRaft_TransactionalBackend_ThreeNode(t *testing.T) { - raft1, dir := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) + raft1, dir := getRaft(t, true, true) + raft2, dir2 := getRaft(t, false, true) + raft3, dir3 := getRaft(t, false, true) defer os.RemoveAll(dir) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -636,7 +700,7 @@ func TestRaft_TransactionalBackend_ThreeNode(t *testing.T) { } func TestRaft_Backend_Performance(t *testing.T) { - b, dir := GetRaft(t, true, false) + b, dir := getRaft(t, true, false) defer os.RemoveAll(dir) defaultConfig := raft.DefaultConfig() @@ -692,9 +756,9 @@ func TestRaft_Backend_Performance(t *testing.T) { } func BenchmarkDB_Puts(b *testing.B) { - raft, dir := GetRaft(b, true, false) + raft, dir := getRaft(b, true, false) defer os.RemoveAll(dir) - raft2, dir2 := GetRaft(b, true, false) + raft2, dir2 := getRaft(b, true, false) defer os.RemoveAll(dir2) bench := func(b *testing.B, s physical.Backend, dataSize int) { @@ -724,7 +788,7 @@ func BenchmarkDB_Puts(b *testing.B) { } func BenchmarkDB_Snapshot(b *testing.B) { - raft, dir := GetRaft(b, true, false) + raft, dir := getRaft(b, true, false) defer os.RemoveAll(dir) data, err := uuid.GenerateRandomBytes(256 * 1024) diff --git a/physical/raft/raft_util.go b/physical/raft/raft_util.go index bd496dfac64bd..34570fba678f5 100644 --- a/physical/raft/raft_util.go +++ b/physical/raft/raft_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !enterprise package raft diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go index 68d9c953f8199..cebcdb0a4a82f 100644 --- a/physical/raft/snapshot.go +++ b/physical/raft/snapshot.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go index 3472c8d53981e..53757c5683d77 100644 --- a/physical/raft/snapshot_test.go +++ b/physical/raft/snapshot_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( @@ -18,7 +15,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" @@ -58,7 +55,7 @@ func addPeer(t *testing.T, leader, follower *RaftBackend) { } func TestRaft_Snapshot_Loading(t *testing.T) { - raft, dir := GetRaft(t, true, false) + raft, dir := getRaft(t, true, false) defer os.RemoveAll(dir) // Write some data @@ -142,7 +139,7 @@ func TestRaft_Snapshot_Loading(t *testing.T) { } func TestRaft_Snapshot_Index(t *testing.T) { - raft, dir := GetRaft(t, true, false) + raft, dir := getRaft(t, true, false) defer os.RemoveAll(dir) err := raft.Put(context.Background(), &physical.Entry{ @@ -229,9 +226,9 @@ func TestRaft_Snapshot_Index(t *testing.T) { } func TestRaft_Snapshot_Peers(t *testing.T) { - raft1, dir := GetRaft(t, true, false) - raft2, dir2 := GetRaft(t, false, false) - raft3, dir3 := GetRaft(t, false, false) + raft1, dir := getRaft(t, true, false) + raft2, dir2 := getRaft(t, false, false) + raft3, dir3 := getRaft(t, false, false) defer os.RemoveAll(dir) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -312,9 +309,9 @@ func ensureCommitApplied(t *testing.T, leaderCommitIdx uint64, backend *RaftBack } func TestRaft_Snapshot_Restart(t *testing.T) { - raft1, dir := GetRaft(t, true, false) + raft1, dir := getRaft(t, true, false) defer os.RemoveAll(dir) - raft2, dir2 := GetRaft(t, false, false) + raft2, dir2 := getRaft(t, false, false) defer os.RemoveAll(dir2) // Write some data @@ -376,9 +373,9 @@ func TestRaft_Snapshot_Restart(t *testing.T) { /* func TestRaft_Snapshot_ErrorRecovery(t *testing.T) { - raft1, dir := GetRaft(t, true, false) - raft2, dir2 := GetRaft(t, false, false) - raft3, dir3 := GetRaft(t, false, false) + raft1, dir := getRaft(t, true, false) + raft2, dir2 := getRaft(t, false, false) + raft3, dir3 := getRaft(t, false, false) defer os.RemoveAll(dir) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -458,9 +455,9 @@ func TestRaft_Snapshot_ErrorRecovery(t *testing.T) { }*/ func TestRaft_Snapshot_Take_Restore(t *testing.T) { - raft1, dir := GetRaft(t, true, false) + raft1, dir := getRaft(t, true, false) defer os.RemoveAll(dir) - raft2, dir2 := GetRaft(t, false, false) + raft2, dir2 := getRaft(t, false, false) defer os.RemoveAll(dir2) addPeer(t, raft1, raft2) diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go index 90d8e495cbaf2..ed154f8bcdaf4 100644 --- a/physical/raft/streamlayer.go +++ b/physical/raft/streamlayer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( diff --git a/physical/raft/streamlayer_test.go b/physical/raft/streamlayer_test.go index d826eaadca759..51a26f832266a 100644 --- a/physical/raft/streamlayer_test.go +++ b/physical/raft/streamlayer_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package raft import ( diff --git a/physical/raft/testing.go b/physical/raft/testing.go deleted file mode 100644 index ea6847911f2bb..0000000000000 --- a/physical/raft/testing.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package raft - -import ( - "context" - "fmt" - "io/ioutil" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" -) - -func GetRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { - raftDir, err := ioutil.TempDir("", "vault-raft-") - if err != nil { - t.Fatal(err) - } - t.Logf("raft dir: %s", raftDir) - - return getRaftWithDir(t, bootstrap, noStoreState, raftDir) -} - -func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir string) (*RaftBackend, string) { - id, err := uuid.GenerateUUID() - if err != nil { - t.Fatal(err) - } - - logger := hclog.New(&hclog.LoggerOptions{ - Name: fmt.Sprintf("raft-%s", id), - Level: hclog.Trace, - }) - logger.Info("raft dir", "dir", raftDir) - - conf := map[string]string{ - "path": raftDir, - "trailing_logs": "100", - "node_id": id, - } - - if noStoreState { - conf["doNotStoreLatestState"] = "" - } - - backendRaw, err := NewRaftBackend(conf, logger) - if err != nil { - t.Fatal(err) - } - backend := backendRaw.(*RaftBackend) - - if bootstrap { - err = backend.Bootstrap([]Peer{ - { - ID: backend.NodeID(), - Address: backend.NodeID(), - }, - }) - if err != nil { - t.Fatal(err) - } - - err = backend.SetupCluster(context.Background(), SetupOpts{}) - if err != nil { - t.Fatal(err) - } - - for { - if backend.raft.AppliedIndex() >= 2 { - break - } - } - - } - - backend.DisableAutopilot() - - return backend, raftDir -} diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go index 2835e1f17d968..6b9e4c4b3659d 100644 --- a/physical/raft/types.pb.go +++ b/physical/raft/types.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: physical/raft/types.proto package raft diff --git a/physical/raft/types.proto b/physical/raft/types.proto index bb3d136e10ebf..0b1d189ef6e5d 100644 --- a/physical/raft/types.proto +++ b/physical/raft/types.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/physical/raft"; diff --git a/physical/raft/vars_32bit.go b/physical/raft/vars_32bit.go index 6e5c51fe93529..c9662e796c563 100644 --- a/physical/raft/vars_32bit.go +++ b/physical/raft/vars_32bit.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build 386 || arm || windows package raft diff --git a/physical/raft/vars_64bit.go b/physical/raft/vars_64bit.go index a1eea0febc08d..40efb4c08910a 100644 --- a/physical/raft/vars_64bit.go +++ b/physical/raft/vars_64bit.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build !386 && !arm && !windows package raft diff --git a/physical/s3/s3.go b/physical/s3/s3.go index 0cb8e0af3552e..e9e1fd33789a9 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package s3 import ( diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go index 139e41dfad98f..794f557e0f39f 100644 --- a/physical/s3/s3_test.go +++ b/physical/s3/s3_test.go @@ -1,17 +1,12 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package s3 import ( - "context" "fmt" "math/rand" "os" "testing" "time" - "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -34,10 +29,6 @@ func DoS3BackendTest(t *testing.T, kmsKeyId string) { t.Skip() } - if !hasAWSCredentials() { - t.Skip("Skipping because AWS credentials could not be resolved. See https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials for information on how to set up AWS credentials.") - } - logger := logging.NewVaultLogger(log.Debug) credsConfig := &awsutil.CredentialsConfig{Logger: logger} @@ -118,20 +109,3 @@ func DoS3BackendTest(t *testing.T, kmsKeyId string) { physical.ExerciseBackend(t, b) physical.ExerciseBackend_ListPrefix(t, b) } - -func hasAWSCredentials() bool { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - cfg, err := config.LoadDefaultConfig(ctx) - if err != nil { - return false - } - - creds, err := cfg.Credentials.Retrieve(ctx) - if err != nil { - return false - } - - return creds.HasKeys() -} diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index b84e0d4637b4c..09634da53c8dd 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package spanner import ( @@ -12,16 +9,17 @@ import ( "strings" "time" - "cloud.google.com/go/spanner" metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/useragent" "github.com/hashicorp/vault/sdk/physical" - "github.com/pkg/errors" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/grpc/codes" + + "cloud.google.com/go/spanner" + "github.com/pkg/errors" ) // Verify Backend satisfies the correct interfaces diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go index d116be0ba34e4..7aa4f8986dbd8 100644 --- a/physical/spanner/spanner_ha.go +++ b/physical/spanner/spanner_ha.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package spanner import ( diff --git a/physical/spanner/spanner_ha_test.go b/physical/spanner/spanner_ha_test.go index dad39ad4c95b4..49a818b393cc5 100644 --- a/physical/spanner/spanner_ha_test.go +++ b/physical/spanner/spanner_ha_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package spanner import ( diff --git a/physical/spanner/spanner_test.go b/physical/spanner/spanner_test.go index 4b7c1c46b1149..d484dd316cb58 100644 --- a/physical/spanner/spanner_test.go +++ b/physical/spanner/spanner_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package spanner import ( diff --git a/physical/swift/swift.go b/physical/swift/swift.go index d616bfe35b320..2155d44c8aba3 100644 --- a/physical/swift/swift.go +++ b/physical/swift/swift.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package swift import ( diff --git a/physical/swift/swift_test.go b/physical/swift/swift_test.go index 8f8af160fefd2..0e569c5a9fae9 100644 --- a/physical/swift/swift_test.go +++ b/physical/swift/swift_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package swift import ( diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go index e52ac9b630338..26c09fb165c70 100644 --- a/physical/zookeeper/zookeeper.go +++ b/physical/zookeeper/zookeeper.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package zookeeper import ( @@ -16,12 +13,13 @@ import ( "sync" "time" - metrics "github.com/armon/go-metrics" - "github.com/go-zookeeper/zk" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/go-secure-stdlib/tlsutil" "github.com/hashicorp/vault/sdk/physical" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/samuel/go-zookeeper/zk" ) const ( @@ -644,9 +642,8 @@ func (i *ZooKeeperHALock) Unlock() error { return } - timer := time.NewTimer(time.Second) select { - case <-timer.C: + case <-time.After(time.Second): attempts := attempts + 1 if attempts >= 10 { i.logger.Error("release lock max attempts reached. Lock may not be released", "error", err) @@ -654,7 +651,6 @@ func (i *ZooKeeperHALock) Unlock() error { } continue case <-i.stopCh: - timer.Stop() return } } diff --git a/physical/zookeeper/zookeeper_test.go b/physical/zookeeper/zookeeper_test.go index e4448bf73ab7c..baaa41fdbf53c 100644 --- a/physical/zookeeper/zookeeper_test.go +++ b/physical/zookeeper/zookeeper_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package zookeeper import ( @@ -9,10 +6,11 @@ import ( "testing" "time" - "github.com/go-zookeeper/zk" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" + + "github.com/samuel/go-zookeeper/zk" ) func TestZooKeeperBackend(t *testing.T) { diff --git a/plugins/database/cassandra/cassandra-database-plugin/main.go b/plugins/database/cassandra/cassandra-database-plugin/main.go index 8a91d1b50c7f3..4ee0903642e08 100644 --- a/plugins/database/cassandra/cassandra-database-plugin/main.go +++ b/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/cassandra/cassandra.go b/plugins/database/cassandra/cassandra.go index 8118fa06171bd..de549261fd60f 100644 --- a/plugins/database/cassandra/cassandra.go +++ b/plugins/database/cassandra/cassandra.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/plugins/database/cassandra/cassandra_test.go b/plugins/database/cassandra/cassandra_test.go index 7a3260935b7c4..ec8b42290d30b 100644 --- a/plugins/database/cassandra/cassandra_test.go +++ b/plugins/database/cassandra/cassandra_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/plugins/database/cassandra/connection_producer.go b/plugins/database/cassandra/connection_producer.go index a63ed27d5eb3f..72f7bb878e162 100644 --- a/plugins/database/cassandra/connection_producer.go +++ b/plugins/database/cassandra/connection_producer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/plugins/database/cassandra/connection_producer_test.go b/plugins/database/cassandra/connection_producer_test.go index e2f4ba0fc59f6..3f99c1d65a009 100644 --- a/plugins/database/cassandra/connection_producer_test.go +++ b/plugins/database/cassandra/connection_producer_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml index 481996968866b..71fdead51f238 100644 --- a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml +++ b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Cassandra storage config YAML # NOTE: diff --git a/plugins/database/cassandra/tls.go b/plugins/database/cassandra/tls.go index 17e148d7496d5..cc64d3c3b5f48 100644 --- a/plugins/database/cassandra/tls.go +++ b/plugins/database/cassandra/tls.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cassandra import ( diff --git a/plugins/database/hana/hana-database-plugin/main.go b/plugins/database/hana/hana-database-plugin/main.go index 9ec568b66db40..2057c36c08d40 100644 --- a/plugins/database/hana/hana-database-plugin/main.go +++ b/plugins/database/hana/hana-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go index 987cc1af2f812..bca437c369a69 100644 --- a/plugins/database/hana/hana.go +++ b/plugins/database/hana/hana.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package hana import ( diff --git a/plugins/database/hana/hana_test.go b/plugins/database/hana/hana_test.go index 6a3c1dbe07d4b..67c1088834897 100644 --- a/plugins/database/hana/hana_test.go +++ b/plugins/database/hana/hana_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package hana import ( diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go index b9f18c5433863..a9a6964ea2101 100644 --- a/plugins/database/influxdb/connection_producer.go +++ b/plugins/database/influxdb/connection_producer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package influxdb import ( diff --git a/plugins/database/influxdb/influxdb-database-plugin/main.go b/plugins/database/influxdb/influxdb-database-plugin/main.go index bfc94f75fd31f..c8f6c5fa1e3fd 100644 --- a/plugins/database/influxdb/influxdb-database-plugin/main.go +++ b/plugins/database/influxdb/influxdb-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/influxdb/influxdb.go b/plugins/database/influxdb/influxdb.go index f216319a6c13c..4a8225e52b3be 100644 --- a/plugins/database/influxdb/influxdb.go +++ b/plugins/database/influxdb/influxdb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package influxdb import ( diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go index 1fc858bfc5c85..4ecdac51bcbd4 100644 --- a/plugins/database/influxdb/influxdb_test.go +++ b/plugins/database/influxdb/influxdb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package influxdb import ( @@ -9,15 +6,14 @@ import ( "net/url" "os" "reflect" - "runtime" "strconv" "strings" "testing" "time" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/helper/testhelpers/docker" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" - "github.com/hashicorp/vault/sdk/helper/docker" influx "github.com/influxdata/influxdb1-client/v2" "github.com/stretchr/testify/require" ) @@ -52,11 +48,6 @@ func (c *Config) connectionParams() map[string]interface{} { } func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { - // Skipping on ARM, as this image can't run on ARM architecture - if strings.Contains(runtime.GOARCH, "arm") { - t.Skip("Skipping, as this image is not supported on ARM architectures") - } - c := &Config{ Username: "influx-root", Password: "influx-root", @@ -67,9 +58,8 @@ func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "docker.mirror.hashicorp.services/influxdb", - ContainerName: "influxdb", - ImageTag: "1.8-alpine", + ImageRepo: "influxdb", + ImageTag: "1.8-alpine", Env: []string{ "INFLUXDB_DB=vault", "INFLUXDB_ADMIN_USER=" + c.Username, diff --git a/plugins/database/mongodb/cert_helpers_test.go b/plugins/database/mongodb/cert_helpers_test.go index 9f9388b1cf2b4..deb04ab9c4e42 100644 --- a/plugins/database/mongodb/cert_helpers_test.go +++ b/plugins/database/mongodb/cert_helpers_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import ( diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go index 4686c3b13f119..348fb6bd4d435 100644 --- a/plugins/database/mongodb/connection_producer.go +++ b/plugins/database/mongodb/connection_producer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import ( diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go index 2ce3872c597f3..4b0ccaf2514a7 100644 --- a/plugins/database/mongodb/connection_producer_test.go +++ b/plugins/database/mongodb/connection_producer_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import ( @@ -17,11 +14,13 @@ import ( "time" "github.com/hashicorp/vault/helper/testhelpers/certhelpers" + "github.com/hashicorp/vault/helper/testhelpers/mongodb" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/ory/dockertest" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readpref" + "gopkg.in/mgo.v2" ) func TestInit_clientTLS(t *testing.T) { @@ -216,12 +215,19 @@ func startMongoWithTLS(t *testing.T, version string, confDir string) (retURL str // exponential backoff-retry err = pool.Retry(func() error { var err error - ctx, _ := context.WithTimeout(context.Background(), 1*time.Minute) - client, err := mongo.Connect(ctx, options.Client().ApplyURI(retURL)) - if err = client.Disconnect(ctx); err != nil { - t.Fatal() + dialInfo, err := mongodb.ParseMongoURL(retURL) + if err != nil { + return err + } + + session, err := mgo.DialWithInfo(dialInfo) + if err != nil { + return err } - return client.Ping(ctx, readpref.Primary()) + defer session.Close() + session.SetSyncTimeout(1 * time.Minute) + session.SetSocketTimeout(1 * time.Minute) + return session.Ping() }) if err != nil { cleanup() diff --git a/plugins/database/mongodb/mongodb-database-plugin/main.go b/plugins/database/mongodb/mongodb-database-plugin/main.go index fe68659eca633..30dd5fdd7cff7 100644 --- a/plugins/database/mongodb/mongodb-database-plugin/main.go +++ b/plugins/database/mongodb/mongodb-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go index 4026fbc693da4..6cb511b89f8e4 100644 --- a/plugins/database/mongodb/mongodb.go +++ b/plugins/database/mongodb/mongodb.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import ( diff --git a/plugins/database/mongodb/mongodb_test.go b/plugins/database/mongodb/mongodb_test.go index 4f36e36179ec4..d647a264f184a 100644 --- a/plugins/database/mongodb/mongodb_test.go +++ b/plugins/database/mongodb/mongodb_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import ( @@ -8,7 +5,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "net/http" "reflect" "strings" "sync" @@ -17,6 +13,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" "github.com/hashicorp/vault/helper/testhelpers/mongodb" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" @@ -30,7 +27,7 @@ import ( const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` func TestMongoDB_Initialize(t *testing.T) { - cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + cleanup, connURL := mongodb.PrepareTestContainer(t, "5.0.10") defer cleanup() db := new() @@ -123,7 +120,7 @@ func TestNewUser_usernameTemplate(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + cleanup, connURL := mongodb.PrepareTestContainer(t, "5.0.10") defer cleanup() db := new() @@ -149,7 +146,7 @@ func TestNewUser_usernameTemplate(t *testing.T) { } func TestMongoDB_CreateUser(t *testing.T) { - cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + cleanup, connURL := mongodb.PrepareTestContainer(t, "5.0.10") defer cleanup() db := new() @@ -181,7 +178,7 @@ func TestMongoDB_CreateUser(t *testing.T) { } func TestMongoDB_CreateUser_writeConcern(t *testing.T) { - cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + cleanup, connURL := mongodb.PrepareTestContainer(t, "5.0.10") defer cleanup() initReq := dbplugin.InitializeRequest{ @@ -215,7 +212,7 @@ func TestMongoDB_CreateUser_writeConcern(t *testing.T) { } func TestMongoDB_DeleteUser(t *testing.T) { - cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + cleanup, connURL := mongodb.PrepareTestContainer(t, "5.0.10") defer cleanup() db := new() @@ -255,7 +252,7 @@ func TestMongoDB_DeleteUser(t *testing.T) { } func TestMongoDB_UpdateUser_Password(t *testing.T) { - cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + cleanup, connURL := mongodb.PrepareTestContainer(t, "5.0.10") defer cleanup() // The docker test method PrepareTestContainer defaults to a database "test" @@ -386,8 +383,6 @@ func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.Cer } var cmpClientOptionsOpts = cmp.Options{ - cmpopts.IgnoreTypes(http.Transport{}), - cmp.AllowUnexported(options.ClientOptions{}), cmp.AllowUnexported(tls.Config{}), diff --git a/plugins/database/mongodb/util.go b/plugins/database/mongodb/util.go index be5842136bb63..a12828f503b8c 100644 --- a/plugins/database/mongodb/util.go +++ b/plugins/database/mongodb/util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mongodb import "go.mongodb.org/mongo-driver/mongo/writeconcern" diff --git a/plugins/database/mssql/mssql-database-plugin/main.go b/plugins/database/mssql/mssql-database-plugin/main.go index 2a57b5746ec61..37a81a660012f 100644 --- a/plugins/database/mssql/mssql-database-plugin/main.go +++ b/plugins/database/mssql/mssql-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go index 7c7a4c27b3b7c..7915732b4d00a 100644 --- a/plugins/database/mssql/mssql.go +++ b/plugins/database/mssql/mssql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mssql import ( diff --git a/plugins/database/mssql/mssql_test.go b/plugins/database/mssql/mssql_test.go index 385c5f0b69ff0..2292490d88a7a 100644 --- a/plugins/database/mssql/mssql_test.go +++ b/plugins/database/mssql/mssql_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mssql import ( diff --git a/plugins/database/mysql/connection_producer.go b/plugins/database/mysql/connection_producer.go index 5c59792131660..208debe519de5 100644 --- a/plugins/database/mysql/connection_producer.go +++ b/plugins/database/mysql/connection_producer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysql import ( diff --git a/plugins/database/mysql/connection_producer_test.go b/plugins/database/mysql/connection_producer_test.go index a3f0bc7ef56e0..eacf18fabe8c8 100644 --- a/plugins/database/mysql/connection_producer_test.go +++ b/plugins/database/mysql/connection_producer_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysql import ( diff --git a/plugins/database/mysql/mysql-database-plugin/main.go b/plugins/database/mysql/mysql-database-plugin/main.go index 56640b2f7b13f..6b1505aff194c 100644 --- a/plugins/database/mysql/mysql-database-plugin/main.go +++ b/plugins/database/mysql/mysql-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/plugins/database/mysql/mysql-legacy-database-plugin/main.go index 8aeba0b36bdc4..ea6b9839a77e3 100644 --- a/plugins/database/mysql/mysql-legacy-database-plugin/main.go +++ b/plugins/database/mysql/mysql-legacy-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go index 0260ec20d2164..db47c71dd3108 100644 --- a/plugins/database/mysql/mysql.go +++ b/plugins/database/mysql/mysql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysql import ( diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go index 07e0165ffe490..3c7eab5af3576 100644 --- a/plugins/database/mysql/mysql_test.go +++ b/plugins/database/mysql/mysql_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mysql import ( diff --git a/plugins/database/postgresql/passwordauthentication.go b/plugins/database/postgresql/passwordauthentication.go deleted file mode 100644 index ec94bafba1180..0000000000000 --- a/plugins/database/postgresql/passwordauthentication.go +++ /dev/null @@ -1,25 +0,0 @@ -package postgresql - -import "fmt" - -// passwordAuthentication determines whether to send passwords in plaintext (password) or hashed (scram-sha-256). -type passwordAuthentication string - -var ( - // passwordAuthenticationPassword is the default. If set, passwords will be sent to PostgreSQL in plain text. - passwordAuthenticationPassword passwordAuthentication = "password" - passwordAuthenticationSCRAMSHA256 passwordAuthentication = "scram-sha-256" -) - -var passwordAuthentications = map[passwordAuthentication]struct{}{ - passwordAuthenticationSCRAMSHA256: {}, - passwordAuthenticationPassword: {}, -} - -func parsePasswordAuthentication(s string) (passwordAuthentication, error) { - if _, ok := passwordAuthentications[passwordAuthentication(s)]; !ok { - return "", fmt.Errorf("'%s' is not a valid password authentication type", s) - } - - return passwordAuthentication(s), nil -} diff --git a/plugins/database/postgresql/postgresql-database-plugin/main.go b/plugins/database/postgresql/postgresql-database-plugin/main.go index f543167d4a3be..75b5fd9babb2a 100644 --- a/plugins/database/postgresql/postgresql-database-plugin/main.go +++ b/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index 66c44cc34a08c..c76558350586d 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package postgresql import ( @@ -12,7 +9,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/plugins/database/postgresql/scram" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" @@ -69,8 +65,7 @@ func new() *PostgreSQL { connProducer.Type = postgreSQLTypeName db := &PostgreSQL{ - SQLConnectionProducer: connProducer, - passwordAuthentication: passwordAuthenticationPassword, + SQLConnectionProducer: connProducer, } return db @@ -79,8 +74,7 @@ func new() *PostgreSQL { type PostgreSQL struct { *connutil.SQLConnectionProducer - usernameProducer template.StringTemplate - passwordAuthentication passwordAuthentication + usernameProducer template.StringTemplate } func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { @@ -108,20 +102,6 @@ func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequ return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) } - passwordAuthenticationRaw, err := strutil.GetString(req.Config, "password_authentication") - if err != nil { - return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve password_authentication: %w", err) - } - - if passwordAuthenticationRaw != "" { - pwAuthentication, err := parsePasswordAuthentication(passwordAuthenticationRaw) - if err != nil { - return dbplugin.InitializeResponse{}, err - } - - p.passwordAuthentication = pwAuthentication - } - resp := dbplugin.InitializeResponse{ Config: newConf, } @@ -205,15 +185,6 @@ func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, ch "username": username, "password": password, } - - if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { - hashedPassword, err := scram.Hash(password) - if err != nil { - return fmt.Errorf("unable to scram-sha256 password: %w", err) - } - m["password"] = hashedPassword - } - if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { return fmt.Errorf("failed to execute query: %w", err) } @@ -298,24 +269,15 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( } defer tx.Rollback() - m := map[string]string{ - "name": username, - "username": username, - "password": req.Password, - "expiration": expirationStr, - } - - if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { - hashedPassword, err := scram.Hash(req.Password) - if err != nil { - return dbplugin.NewUserResponse{}, fmt.Errorf("unable to scram-sha256 password: %w", err) - } - m["password"] = hashedPassword - } - for _, stmt := range req.Statements.Commands { if containsMultilineStatement(stmt) { // Execute it as-is. + m := map[string]string{ + "name": username, + "username": username, + "password": req.Password, + "expiration": expirationStr, + } if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) } @@ -328,6 +290,12 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( continue } + m := map[string]string{ + "name": username, + "username": username, + "password": req.Password, + "expiration": expirationStr, + } if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) } @@ -370,17 +338,6 @@ func (p *PostgreSQL) customDeleteUser(ctx context.Context, username string, revo }() for _, stmt := range revocationStmts { - if containsMultilineStatement(stmt) { - // Execute it as-is. - m := map[string]string{ - "name": username, - "username": username, - } - if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { - return err - } - continue - } for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { query = strings.TrimSpace(query) if len(query) == 0 { diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go index 5e89ee912c18c..3bb1bd594de97 100644 --- a/plugins/database/postgresql/postgresql_test.go +++ b/plugins/database/postgresql/postgresql_test.go @@ -1,24 +1,19 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package postgresql import ( "context" "database/sql" "fmt" - "os" "strings" "testing" "time" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/helper/testhelpers/postgresql" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" - "github.com/hashicorp/vault/sdk/database/helper/dbutil" - "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/template" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -94,97 +89,6 @@ func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) { } } -// TestPostgreSQL_PasswordAuthentication tests that the default "password_authentication" is "none", and that -// an error is returned if an invalid "password_authentication" is provided. -func TestPostgreSQL_PasswordAuthentication(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") - defer cleanup() - - dsnConnURL, err := dbutil.ParseURL(connURL) - assert.NoError(t, err) - db := new() - - ctx := context.Background() - - t.Run("invalid-password-authentication", func(t *testing.T) { - connectionDetails := map[string]interface{}{ - "connection_url": dsnConnURL, - "password_authentication": "invalid-password-authentication", - } - - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - _, err := db.Initialize(ctx, req) - assert.EqualError(t, err, "'invalid-password-authentication' is not a valid password authentication type") - }) - - t.Run("default-is-none", func(t *testing.T) { - connectionDetails := map[string]interface{}{ - "connection_url": dsnConnURL, - } - - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - _ = dbtesting.AssertInitialize(t, db, req) - assert.Equal(t, passwordAuthenticationPassword, db.passwordAuthentication) - }) -} - -// TestPostgreSQL_PasswordAuthentication_SCRAMSHA256 tests that password_authentication works when set to scram-sha-256. -// When sending an encrypted password, the raw password should still successfully authenticate the user. -func TestPostgreSQL_PasswordAuthentication_SCRAMSHA256(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") - defer cleanup() - - dsnConnURL, err := dbutil.ParseURL(connURL) - if err != nil { - t.Fatal(err) - } - - connectionDetails := map[string]interface{}{ - "connection_url": dsnConnURL, - "password_authentication": string(passwordAuthenticationSCRAMSHA256), - } - - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - db := new() - resp := dbtesting.AssertInitialize(t, db, req) - assert.Equal(t, string(passwordAuthenticationSCRAMSHA256), resp.Config["password_authentication"]) - - if !db.Initialized { - t.Fatal("Database should be initialized") - } - - ctx := context.Background() - newUserRequest := dbplugin.NewUserRequest{ - Statements: dbplugin.Statements{ - Commands: []string{ - ` - CREATE ROLE "{{name}}" WITH - LOGIN - PASSWORD '{{password}}' - VALID UNTIL '{{expiration}}'; - GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";`, - }, - }, - Password: "somesecurepassword", - Expiration: time.Now().Add(1 * time.Minute), - } - newUserResponse, err := db.NewUser(ctx, newUserRequest) - - assertCredsExist(t, db.ConnectionURL, newUserResponse.Username, newUserRequest.Password) -} - func TestPostgreSQL_NewUser(t *testing.T) { type testCase struct { req dbplugin.NewUserRequest @@ -683,19 +587,6 @@ func TestDeleteUser(t *testing.T) { // Wait for a short time before checking because postgres takes a moment to finish deleting the user credsAssertion: assertCredsExistAfter(100 * time.Millisecond), }, - "multiline": { - revokeStmts: []string{` - DO $$ BEGIN - REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{username}}"; - REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{username}}"; - REVOKE USAGE ON SCHEMA public FROM "{{username}}"; - DROP ROLE IF EXISTS "{{username}}"; - END $$; - `}, - expectErr: false, - // Wait for a short time before checking because postgres takes a moment to finish deleting the user - credsAssertion: waitUntilCredsDoNotExist(2 * time.Second), - }, } // Shared test container for speed - there should not be any overlap between the tests @@ -1099,142 +990,3 @@ func TestNewUser_CustomUsername(t *testing.T) { }) } } - -// This is a long-running integration test which tests the functionality of Postgres's multi-host -// connection strings. It uses two Postgres containers preconfigured with Replication Manager -// provided by Bitnami. This test currently does not run in CI and must be run manually. This is -// due to the test length, as it requires multiple sleep calls to ensure cluster setup and -// primary node failover occurs before the test steps continue. -// -// To run the test, set the environment variable POSTGRES_MULTIHOST_NET to the value of -// a docker network you've preconfigured, e.g. -// 'docker network create -d bridge postgres-repmgr' -// 'export POSTGRES_MULTIHOST_NET=postgres-repmgr' -func TestPostgreSQL_Repmgr(t *testing.T) { - _, exists := os.LookupEnv("POSTGRES_MULTIHOST_NET") - if !exists { - t.Skipf("POSTGRES_MULTIHOST_NET not set, skipping test") - } - - // Run two postgres-repmgr containers in a replication cluster - db0, runner0, url0, container0 := testPostgreSQL_Repmgr_Container(t, "psql-repl-node-0") - _, _, url1, _ := testPostgreSQL_Repmgr_Container(t, "psql-repl-node-1") - - ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) - defer cancel() - - time.Sleep(10 * time.Second) - - // Write a read role to the cluster - _, err := db0.NewUser(ctx, dbplugin.NewUserRequest{ - Statements: dbplugin.Statements{ - Commands: []string{ - `CREATE ROLE "ro" NOINHERIT; - GRANT SELECT ON ALL TABLES IN SCHEMA public TO "ro";`, - }, - }, - }) - if err != nil { - t.Fatalf("no error expected, got: %s", err) - } - - // Open a connection to both databases using the multihost connection string - connectionDetails := map[string]interface{}{ - "connection_url": fmt.Sprintf("postgresql://{{username}}:{{password}}@%s,%s/postgres?target_session_attrs=read-write", getHost(url0), getHost(url1)), - "username": "postgres", - "password": "secret", - } - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - db := new() - dbtesting.AssertInitialize(t, db, req) - if !db.Initialized { - t.Fatal("Database should be initialized") - } - defer db.Close() - - // Add a user to the cluster, then stop the primary container - if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { - t.Fatalf("no error expected, got: %s", err) - } - postgresql.StopContainer(t, ctx, runner0, container0) - - // Try adding a new user immediately - expect failure as the database - // cluster is still switching primaries - err = testPostgreSQL_Repmgr_AddUser(ctx, db) - if !strings.HasSuffix(err.Error(), "ValidateConnect failed (read only connection)") { - t.Fatalf("expected error was not received, got: %s", err) - } - - time.Sleep(20 * time.Second) - - // Try adding a new user again which should succeed after the sleep - // as the primary failover should have finished. Then, restart - // the first container which should become a secondary DB. - if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { - t.Fatalf("no error expected, got: %s", err) - } - postgresql.RestartContainer(t, ctx, runner0, container0) - - time.Sleep(10 * time.Second) - - // A final new user to add, which should succeed after the secondary joins. - if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { - t.Fatalf("no error expected, got: %s", err) - } - - if err := db.Close(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func testPostgreSQL_Repmgr_Container(t *testing.T, name string) (*PostgreSQL, *docker.Runner, string, string) { - envVars := []string{ - "REPMGR_NODE_NAME=" + name, - "REPMGR_NODE_NETWORK_NAME=" + name, - } - - runner, cleanup, connURL, containerID := postgresql.PrepareTestContainerRepmgr(t, name, "13.4.0", envVars) - t.Cleanup(cleanup) - - connectionDetails := map[string]interface{}{ - "connection_url": connURL, - } - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - db := new() - dbtesting.AssertInitialize(t, db, req) - if !db.Initialized { - t.Fatal("Database should be initialized") - } - - if err := db.Close(); err != nil { - t.Fatalf("err: %s", err) - } - - return db, runner, connURL, containerID -} - -func testPostgreSQL_Repmgr_AddUser(ctx context.Context, db *PostgreSQL) error { - _, err := db.NewUser(ctx, dbplugin.NewUserRequest{ - Statements: dbplugin.Statements{ - Commands: []string{ - `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}' INHERIT; - GRANT ro TO "{{name}}";`, - }, - }, - }) - - return err -} - -func getHost(url string) string { - splitCreds := strings.Split(url, "@")[1] - - return strings.Split(splitCreds, "/")[0] -} diff --git a/plugins/database/postgresql/scram/LICENSE b/plugins/database/postgresql/scram/LICENSE deleted file mode 100644 index cc36995f299f4..0000000000000 --- a/plugins/database/postgresql/scram/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Taishi Kasuga - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/database/postgresql/scram/scram.go b/plugins/database/postgresql/scram/scram.go deleted file mode 100644 index f5c6923cef662..0000000000000 --- a/plugins/database/postgresql/scram/scram.go +++ /dev/null @@ -1,86 +0,0 @@ -package scram - -// -// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/interfaces/libpq/fe-auth.c#L1167-L1285 -// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/interfaces/libpq/fe-auth-scram.c#L868-L905 -// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/port/pg_strong_random.c#L66-L96 -// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L160-L274 -// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L27-L85 - -// Implementation from https://github.com/supercaracal/scram-sha-256/blob/d3c05cd927770a11c6e12de3e3a99c3446a1f78d/main.go -import ( - "crypto/hmac" - "crypto/rand" - "crypto/sha256" - "encoding/base64" - "fmt" - "io" - - "golang.org/x/crypto/pbkdf2" -) - -const ( - // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L36-L41 - saltSize = 16 - - // @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/include/common/sha2.h#L22 - digestLen = 32 - - // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L43-L47 - iterationCnt = 4096 -) - -var ( - clientRawKey = []byte("Client Key") - serverRawKey = []byte("Server Key") -) - -func genSalt(size int) ([]byte, error) { - salt := make([]byte, size) - if _, err := io.ReadFull(rand.Reader, salt); err != nil { - return nil, err - } - return salt, nil -} - -func encodeB64(src []byte) (dst []byte) { - dst = make([]byte, base64.StdEncoding.EncodedLen(len(src))) - base64.StdEncoding.Encode(dst, src) - return -} - -func getHMACSum(key, msg []byte) []byte { - h := hmac.New(sha256.New, key) - _, _ = h.Write(msg) - return h.Sum(nil) -} - -func getSHA256Sum(key []byte) []byte { - h := sha256.New() - _, _ = h.Write(key) - return h.Sum(nil) -} - -func hashPassword(rawPassword, salt []byte, iter, keyLen int) string { - digestKey := pbkdf2.Key(rawPassword, salt, iter, keyLen, sha256.New) - clientKey := getHMACSum(digestKey, clientRawKey) - storedKey := getSHA256Sum(clientKey) - serverKey := getHMACSum(digestKey, serverRawKey) - - return fmt.Sprintf("SCRAM-SHA-256$%d:%s$%s:%s", - iter, - string(encodeB64(salt)), - string(encodeB64(storedKey)), - string(encodeB64(serverKey)), - ) -} - -func Hash(password string) (string, error) { - salt, err := genSalt(saltSize) - if err != nil { - return "", err - } - - hashedPassword := hashPassword([]byte(password), salt, iterationCnt, digestLen) - return hashedPassword, nil -} diff --git a/plugins/database/postgresql/scram/scram_test.go b/plugins/database/postgresql/scram/scram_test.go deleted file mode 100644 index d2933ebbca404..0000000000000 --- a/plugins/database/postgresql/scram/scram_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package scram - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -// TestScram tests the Hash method. The hashed password string should have a SCRAM-SHA-256 prefix. -func TestScram(t *testing.T) { - tcs := map[string]struct { - Password string - }{ - "empty-password": {Password: ""}, - "simple-password": {Password: "password"}, - } - - for name, tc := range tcs { - t.Run(name, func(t *testing.T) { - got, err := Hash(tc.Password) - assert.NoError(t, err) - assert.True(t, strings.HasPrefix(got, "SCRAM-SHA-256$4096:")) - assert.Len(t, got, 133) - }) - } -} diff --git a/plugins/database/redshift/redshift-database-plugin/main.go b/plugins/database/redshift/redshift-database-plugin/main.go index 7fcd9b0b64879..8d2f796eeab3f 100644 --- a/plugins/database/redshift/redshift-database-plugin/main.go +++ b/plugins/database/redshift/redshift-database-plugin/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main import ( diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go index 11ce30a73f338..ce39569d4858c 100644 --- a/plugins/database/redshift/redshift.go +++ b/plugins/database/redshift/redshift.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package redshift import ( diff --git a/plugins/database/redshift/redshift_test.go b/plugins/database/redshift/redshift_test.go index af264587b6cd3..0fb70dec18957 100644 --- a/plugins/database/redshift/redshift_test.go +++ b/plugins/database/redshift/redshift_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package redshift import ( @@ -14,7 +11,6 @@ import ( "time" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/testhelpers" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" "github.com/hashicorp/vault/sdk/helper/dbtxn" @@ -45,12 +41,6 @@ var ( keyRedshiftUser = "REDSHIFT_USER" keyRedshiftPassword = "REDSHIFT_PASSWORD" - credNames = []string{ - keyRedshiftURL, - keyRedshiftUser, - keyRedshiftPassword, - } - vaultACC = "VAULT_ACC" ) @@ -80,9 +70,6 @@ func TestRedshift_Initialize(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, _, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -121,9 +108,6 @@ func TestRedshift_NewUser(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, url, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -174,9 +158,6 @@ func TestRedshift_NewUser_NoCreationStatement_ShouldError(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, _, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -220,9 +201,6 @@ func TestRedshift_UpdateUser_Expiration(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, url, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -283,9 +261,6 @@ func TestRedshift_UpdateUser_Password(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, url, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -340,9 +315,6 @@ func TestRedshift_DeleteUser(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, url, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -408,9 +380,6 @@ func TestRedshift_DefaultUsernameTemplate(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, url, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) @@ -459,9 +428,6 @@ func TestRedshift_CustomUsernameTemplate(t *testing.T) { t.SkipNow() } - // Ensure each cred is populated. - testhelpers.SkipUnlessEnvVarsSet(t, credNames) - connURL, url, _, _, err := redshiftEnv() if err != nil { t.Fatal(err) diff --git a/scan.hcl b/scan.hcl deleted file mode 100644 index 7553139d17f34..0000000000000 --- a/scan.hcl +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -repository { - go_modules = true - osv = true - secrets { - all = true - } - dependabot { - required = true - check_config = true - } - - plugin "semgrep" { - use_git_ignore = true - exclude = ["vendor"] - config = ["tools/semgrep/ci", "p/r2c-security-audit"] - exclude_rule = ["generic.html-templates.security.unquoted-attribute-var.unquoted-attribute-var"] - } - - plugin "codeql" { - languages = ["go"] - } -} diff --git a/scripts/assetcheck.sh b/scripts/assetcheck.sh index d846dd5f9f4ca..7100f84d9faef 100755 --- a/scripts/assetcheck.sh +++ b/scripts/assetcheck.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - if [[ ! -e http/web_ui/index.html ]] then diff --git a/scripts/build.sh b/scripts/build.sh index cf990fa265c6b..390b62f87c418 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # # This script builds the application from source for multiple platforms. set -e @@ -43,20 +40,18 @@ mkdir -p bin/ echo "==> Building..." ${GO_CMD} build \ -gcflags "${GCFLAGS}" \ - -ldflags "${LD_FLAGS} -X github.com/hashicorp/vault/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}' -X github.com/hashicorp/vault/version.BuildDate=${BUILD_DATE}" \ + -ldflags "${LD_FLAGS} -X github.com/hashicorp/vault/sdk/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}' -X github.com/hashicorp/vault/sdk/version.BuildDate=${BUILD_DATE}" \ -o "bin/vault" \ -tags "${BUILD_TAGS}" \ . # Move all the compiled things to the $GOPATH/bin OLDIFS=$IFS -IFS=: FIRST=($GOPATH) BIN_PATH=${GOBIN:-${FIRST}/bin} +IFS=: MAIN_GOPATH=($GOPATH) IFS=$OLDIFS -# Ensure the go bin folder exists -mkdir -p ${BIN_PATH} -rm -f ${BIN_PATH}/vault -cp bin/vault ${BIN_PATH} +rm -f ${MAIN_GOPATH}/bin/vault +cp bin/vault ${MAIN_GOPATH}/bin/ # Done! echo diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh index 4e33a8e8aa775..a2730f5cc11a0 100755 --- a/scripts/ci-helper.sh +++ b/scripts/ci-helper.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # The ci-helper is used to determine build metadata, build Vault binaries, # package those binaries into artifacts, and execute tests with those artifacts. @@ -41,7 +38,7 @@ function version_base() { return fi - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + : "${VERSION_FILE:=$(repo_root)/sdk/version/version_base.go}" awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" } @@ -69,7 +66,7 @@ function version_pre() { return fi - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + : "${VERSION_FILE:=$(repo_root)/sdk/version/version_base.go}" awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" } @@ -82,7 +79,7 @@ function version_metadata() { return fi - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + : "${VERSION_FILE:=$(repo_root)/sdk/version/version_base.go}" awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" } @@ -132,9 +129,9 @@ function build_ui() { mkdir -p http/web_ui popd pushd "$repo_root/ui" - yarn install + yarn install --ignore-optional npm rebuild node-sass - yarn run build + yarn --verbose run build popd } @@ -154,28 +151,28 @@ function build() { prerelease=$(version_pre) build_date=$(build_date) : "${GO_TAGS:=""}" - : "${REMOVE_SYMBOLS:=""}" + : "${KEEP_SYMBOLS:=""}" # Build our ldflags msg="--> Building Vault v$version, revision $revision, built $build_date" - # Keep the symbol and dwarf information by default - if [ -n "$REMOVE_SYMBOLS" ]; then - ldflags="-s -w " - else + # Strip the symbol and dwarf information by default + if [ -n "$KEEP_SYMBOLS" ]; then ldflags="" + else + ldflags="-s -w " fi - ldflags="${ldflags}-X github.com/hashicorp/vault/version.Version=$version -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date" + ldflags="${ldflags}-X github.com/hashicorp/vault/sdk/version.Version=$version -X github.com/hashicorp/vault/sdk/version.GitCommit=$revision -X github.com/hashicorp/vault/sdk/version.BuildDate=$build_date" if [ -n "$prerelease" ]; then msg="${msg}, prerelease ${prerelease}" - ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionPrerelease=$prerelease" + ldflags="${ldflags} -X github.com/hashicorp/vault/sdk/version.VersionPrerelease=$prerelease" fi if [ -n "$metadata" ]; then msg="${msg}, metadata ${metadata}" - ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$metadata" + ldflags="${ldflags} -X github.com/hashicorp/vault/sdk/version.VersionMetadata=$metadata" fi # Build vault diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 7f5d49e534dea..ad80496d15787 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -1,7 +1,4 @@ #!/bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Generate test coverage statistics for Go packages. # # Works around the fact that `go test -coverprofile` currently does not work diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index c0fddb3b05fdc..504399c3ff386 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - FROM debian:buster RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ @@ -15,7 +12,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ libltdl-dev \ libltdl7 -RUN curl -sL https://deb.nodesource.com/setup_16.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/scripts/deprecations-checker.sh b/scripts/deprecations-checker.sh deleted file mode 100755 index 017149e6d7095..0000000000000 --- a/scripts/deprecations-checker.sh +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -# This script is sourced into the shell running in a Github Actions workflow. - -# Usage: -# To check deprecations locally using the script, follow these steps: -# From the repository root or within a package folder, execute deprecations-checker.sh -# Optionally: to only show deprecations in changed files between the current branch and -# a specific branch, pass the other branch name as an argument to the script. -# -# For example: -# ./scripts/deprecations-checker.sh (or) make deprecations -# ./scripts/deprecations-checker.sh main (or) make ci-deprecations -# -# If no branch name is specified, the command will show all usage of deprecations in the code. -# -# GitHub Actions runs this against the PR's base ref branch. - -# Staticcheck uses static analysis to finds bugs and performance issues, offers simplifications, -# and enforces style rules. -# Here, it is used to check if a deprecated function, variable, constant or field is used. - -# Run staticcheck -echo "Performing deprecations check: running staticcheck" - -# Identify repository name -if [ -z $2 ]; then - # local repository name - repositoryName=$(basename `git rev-parse --show-toplevel`) -else - # github repository name from deprecated-functions-checker.yml - repositoryName=$2 -fi - -# Modify the command with the correct build tag based on repository -if [ $repositoryName == "vault-enterprise" ]; then - staticcheckCommand=$(echo "staticcheck ./... -tags=enterprise") -else - staticcheckCommand=$(echo "staticcheck ./...") -fi - -# If no compare branch name is specified, output all deprecations -# Else only output the deprecations from the changes added -if [ -z $1 ] - then - $staticcheckCommand | grep deprecated - else - # GitHub Actions will use this to find only changes wrt PR's base ref branch - # revgrep CLI tool will return an exit status of 1 if any issues match, else it will return 0 - $staticcheckCommand | grep deprecated 2>&1 | revgrep "$(git merge-base HEAD "origin/$1")" -fi diff --git a/scripts/deps_upgrade.py b/scripts/deps_upgrade.py index edd1b52f10398..9531696cee565 100644 --- a/scripts/deps_upgrade.py +++ b/scripts/deps_upgrade.py @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - import os import sys diff --git a/scripts/dist.sh b/scripts/dist.sh index fc605d4fdd994..e9891b059e1c9 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e # Get the version from the command line diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index ceb6ec6ee73dd..6208badf4bc89 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. ARG VERSION diff --git a/scripts/docker/Dockerfile.ui b/scripts/docker/Dockerfile.ui index cac96929b43dd..b13f0fe1fbd4e 100644 --- a/scripts/docker/Dockerfile.ui +++ b/scripts/docker/Dockerfile.ui @@ -19,7 +19,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ libltdl-dev \ libltdl7 -RUN curl -sL https://deb.nodesource.com/setup_16.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/scripts/docker/docker-entrypoint.sh b/scripts/docker/docker-entrypoint.sh index 2b9b8f35a1606..3b72da25b7f41 100755 --- a/scripts/docker/docker-entrypoint.sh +++ b/scripts/docker/docker-entrypoint.sh @@ -1,7 +1,4 @@ #!/usr/bin/dumb-init /bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e # Note above that we run dumb-init as PID 1 in order to reap zombie processes diff --git a/scripts/gen_openapi.sh b/scripts/gen_openapi.sh index de4589877d362..e98633b295e5b 100755 --- a/scripts/gen_openapi.sh +++ b/scripts/gen_openapi.sh @@ -1,7 +1,4 @@ #!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e @@ -24,80 +21,70 @@ then fi vault server -dev -dev-root-token-id=root & -sleep 5 +sleep 2 VAULT_PID=$! -defer_stop_vault() { - echo "Stopping Vault..." - kill $VAULT_PID - sleep 1 -} - -trap defer_stop_vault INT TERM EXIT - -export VAULT_ADDR=http://127.0.0.1:8200 - -echo "Mounting all builtin plugins..." +echo "Mounting all builtin backends..." + +# Read auth backends +codeLinesStarted=false +inQuotesRegex='".*"' +while read -r line; do + if [[ $line == *"credentialBackends:"* ]] ; then + codeLinesStarted=true + elif [ $codeLinesStarted = true ] && [[ $line = *"}"* ]] ; then + break + elif [ $codeLinesStarted = true ] && [[ $line =~ $inQuotesRegex ]] && [[ $line != *"Deprecated"* ]] ; then + backend=${BASH_REMATCH[0]} + plugin=$(sed -e 's/^"//' -e 's/"$//' <<<"$backend") + vault auth enable "${plugin}" + fi +done <../../vault/helper/builtinplugins/registry.go + +# Read secrets backends +codeLinesStarted=false +while read -r line; do + if [[ $line == *"logicalBackends:"* ]] ; then + codeLinesStarted=true + elif [ $codeLinesStarted = true ] && [[ $line = *"}"* ]] ; then + break + elif [ $codeLinesStarted = true ] && [[ $line =~ $inQuotesRegex ]] && [[ $line != *"Deprecated"* ]] ; then + backend=${BASH_REMATCH[0]} + plugin=$(sed -e 's/^"//' -e 's/"$//' <<<"$backend") + vault secrets enable "${plugin}" + fi +done <../../vault/helper/builtinplugins/registry.go -# Enable auth plugins -vault auth enable "alicloud" -vault auth enable "approle" -vault auth enable "aws" -vault auth enable "azure" -vault auth enable "centrify" -vault auth enable "cert" -vault auth enable "cf" -vault auth enable "gcp" -vault auth enable "github" -vault auth enable "jwt" -vault auth enable "kerberos" -vault auth enable "kubernetes" -vault auth enable "ldap" -vault auth enable "oci" -vault auth enable "okta" -vault auth enable "radius" -vault auth enable "userpass" - -# Enable secrets plugins -vault secrets enable "alicloud" -vault secrets enable "aws" -vault secrets enable "azure" -vault secrets enable "consul" -vault secrets enable "database" -vault secrets enable "gcp" -vault secrets enable "gcpkms" -vault secrets enable "kubernetes" -vault secrets enable "kv" -vault secrets enable "ldap" -vault secrets enable "mongodbatlas" -vault secrets enable "nomad" -vault secrets enable "pki" -vault secrets enable "rabbitmq" -vault secrets enable "ssh" -vault secrets enable "terraform" -vault secrets enable "totp" -vault secrets enable "transit" # Enable enterprise features -if [[ -n "${VAULT_LICENSE:-}" ]]; then - vault write sys/license text="${VAULT_LICENSE}" - - vault secrets enable "keymgmt" - vault secrets enable "kmip" - vault secrets enable "transform" +entRegFile=../../vault/helper/builtinplugins/registry_util_ent.go +if [ -f $entRegFile ] && [[ -n "$VAULT_LICENSE" ]]; then + vault write sys/license text="$VAULT_LICENSE" + + inQuotesRegex='".*"' + codeLinesStarted=false + while read -r line; do + if [[ $line == *"ExternalPluginsEnt"* ]] ; then + codeLinesStarted=true + elif [ $codeLinesStarted = true ] && [[ $line = *"}"* ]] ; then + break + elif [ $codeLinesStarted = true ] && [[ $line =~ $inQuotesRegex ]] && [[ $line != *"Deprecated"* ]] ; then + backend=${BASH_REMATCH[0]} + plugin=$(sed -e 's/^"//' -e 's/"$//' <<<"$backend") + vault secrets enable "${plugin}" + fi + done <$entRegFile fi # Output OpenAPI, optionally formatted if [ "$1" == "-p" ]; then - curl --header 'X-Vault-Token: root' \ - --data '{"generic_mount_paths": true}' \ - 'http://127.0.0.1:8200/v1/sys/internal/specs/openapi' | jq > openapi.json + curl -H "X-Vault-Token: root" "http://127.0.0.1:8200/v1/sys/internal/specs/openapi" | jq > openapi.json else - curl --header 'X-Vault-Token: root' \ - --data '{"generic_mount_paths": true}' \ - 'http://127.0.0.1:8200/v1/sys/internal/specs/openapi' > openapi.json + curl -H "X-Vault-Token: root" "http://127.0.0.1:8200/v1/sys/internal/specs/openapi" > openapi.json fi +kill $VAULT_PID +sleep 1 + echo echo "openapi.json generated" -echo diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh index 5c58f178558b9..574f4d7167c94 100755 --- a/scripts/gofmtcheck.sh +++ b/scripts/gofmtcheck.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - echo "==> Checking that code complies with gofmt requirements..." diff --git a/scripts/goversioncheck.sh b/scripts/goversioncheck.sh index 7ee7422581e5e..6f55260099f2e 100755 --- a/scripts/goversioncheck.sh +++ b/scripts/goversioncheck.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - GO_CMD=${GO_CMD:-go} diff --git a/scripts/protocversioncheck.sh b/scripts/protocversioncheck.sh index a2cbc6cc3f279..4b081674806b9 100755 --- a/scripts/protocversioncheck.sh +++ b/scripts/protocversioncheck.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -euo pipefail diff --git a/scripts/semgrep_plugin_repos.sh b/scripts/semgrep_plugin_repos.sh index 6dc7407320caa..41f6dfd7a158a 100755 --- a/scripts/semgrep_plugin_repos.sh +++ b/scripts/semgrep_plugin_repos.sh @@ -1,7 +1,4 @@ #!/bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e set -x diff --git a/scripts/testciphers.sh b/scripts/testciphers.sh index f9684f570bba9..324d6bce7e02a 100755 --- a/scripts/testciphers.sh +++ b/scripts/testciphers.sh @@ -1,7 +1,4 @@ #!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # Adapted from https://superuser.com/a/224263 diff --git a/scripts/update_deps.sh b/scripts/update_deps.sh index f491b7e69bc40..35f0fecdf1b57 100755 --- a/scripts/update_deps.sh +++ b/scripts/update_deps.sh @@ -1,7 +1,4 @@ #!/bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/scripts/update_plugin_modules.sh b/scripts/update_plugin_modules.sh index 2a300f3bc8461..ae87fd8d6a5bf 100755 --- a/scripts/update_plugin_modules.sh +++ b/scripts/update_plugin_modules.sh @@ -1,7 +1,4 @@ #!/bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - set -e diff --git a/scripts/windows/build.bat b/scripts/windows/build.bat index 79e27605a72fd..1452911e514a7 100644 --- a/scripts/windows/build.bat +++ b/scripts/windows/build.bat @@ -63,7 +63,7 @@ del /f "%_GO_ENV_TMP_FILE%" 2>nul REM Build! echo ==^> Building... go build^ - -ldflags "-X github.com/hashicorp/vault/version.GitCommit=%_GIT_COMMIT%%_GIT_DIRTY% -X github.com/hashicorp/vault/version.BuildDate=%_BUILD_DATE%"^ + -ldflags "-X github.com/hashicorp/vault/sdk/version.GitCommit=%_GIT_COMMIT%%_GIT_DIRTY% -X github.com/hashicorp/vault/sdk/version.BuildDate=%_BUILD_DATE%"^ -o "bin/vault.exe"^ . diff --git a/sdk/database/dbplugin/client.go b/sdk/database/dbplugin/client.go index 265b46b6108a5..c30c86d0c9109 100644 --- a/sdk/database/dbplugin/client.go +++ b/sdk/database/dbplugin/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go index 62964c7d15d62..c916c861e27fa 100644 --- a/sdk/database/dbplugin/database.pb.go +++ b/sdk/database/dbplugin/database.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/database/dbplugin/database.proto package dbplugin diff --git a/sdk/database/dbplugin/database.proto b/sdk/database/dbplugin/database.proto index ed29252789898..d8c208099b368 100644 --- a/sdk/database/dbplugin/database.proto +++ b/sdk/database/dbplugin/database.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; diff --git a/sdk/database/dbplugin/databasemiddleware.go b/sdk/database/dbplugin/databasemiddleware.go index d7cabafefe29b..29c806113844b 100644 --- a/sdk/database/dbplugin/databasemiddleware.go +++ b/sdk/database/dbplugin/databasemiddleware.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/grpc_transport.go b/sdk/database/dbplugin/grpc_transport.go index 3740ef59c3b81..fbae626df3973 100644 --- a/sdk/database/dbplugin/grpc_transport.go +++ b/sdk/database/dbplugin/grpc_transport.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go index 0b01454123c82..29f2f1f898b89 100644 --- a/sdk/database/dbplugin/plugin.go +++ b/sdk/database/dbplugin/plugin.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/server.go b/sdk/database/dbplugin/server.go index bf96a3bba4d91..4949384baf565 100644 --- a/sdk/database/dbplugin/server.go +++ b/sdk/database/dbplugin/server.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/conversions_test.go b/sdk/database/dbplugin/v5/conversions_test.go index 5e65c3467068b..6207f0f39f7f2 100644 --- a/sdk/database/dbplugin/v5/conversions_test.go +++ b/sdk/database/dbplugin/v5/conversions_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( @@ -65,7 +62,6 @@ func TestConversionsHaveAllFields(t *testing.T) { CredentialType: CredentialTypeRSAPrivateKey, PublicKey: []byte("-----BEGIN PUBLIC KEY-----"), Password: "password", - Subject: "subject", Expiration: time.Now(), } diff --git a/sdk/database/dbplugin/v5/database.go b/sdk/database/dbplugin/v5/database.go index 065aaefd5a246..b73bd6858dca7 100644 --- a/sdk/database/dbplugin/v5/database.go +++ b/sdk/database/dbplugin/v5/database.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( @@ -123,10 +120,6 @@ type NewUserRequest struct { // The value is set when the credential type is CredentialTypeRSAPrivateKey. PublicKey []byte - // Subject is the distinguished name for the client certificate credential. - // Value is set when the credential type is CredentialTypeClientCertificate. - Subject string - // Expiration of the user. Not all database plugins will support this. Expiration time.Time } @@ -150,7 +143,6 @@ type CredentialType int const ( CredentialTypePassword CredentialType = iota CredentialTypeRSAPrivateKey - CredentialTypeClientCertificate ) func (k CredentialType) String() string { @@ -159,8 +151,6 @@ func (k CredentialType) String() string { return "password" case CredentialTypeRSAPrivateKey: return "rsa_private_key" - case CredentialTypeClientCertificate: - return "client_certificate" default: return "unknown" } diff --git a/sdk/database/dbplugin/v5/grpc_client.go b/sdk/database/dbplugin/v5/grpc_client.go index 9b0b984f42af1..cfddfcd578efa 100644 --- a/sdk/database/dbplugin/v5/grpc_client.go +++ b/sdk/database/dbplugin/v5/grpc_client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( @@ -104,10 +101,6 @@ func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { if len(req.PublicKey) == 0 { return nil, fmt.Errorf("missing public key credential") } - case CredentialTypeClientCertificate: - if req.Subject == "" { - return nil, fmt.Errorf("missing certificate subject") - } default: return nil, fmt.Errorf("unknown credential type") } @@ -125,7 +118,6 @@ func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { CredentialType: int32(req.CredentialType), Password: req.Password, PublicKey: req.PublicKey, - Subject: req.Subject, Expiration: expiration, Statements: &proto.Statements{ Commands: req.Statements.Commands, diff --git a/sdk/database/dbplugin/v5/grpc_client_test.go b/sdk/database/dbplugin/v5/grpc_client_test.go index 05ecb960e60a3..b187d736d80ed 100644 --- a/sdk/database/dbplugin/v5/grpc_client_test.go +++ b/sdk/database/dbplugin/v5/grpc_client_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/grpc_database_plugin.go b/sdk/database/dbplugin/v5/grpc_database_plugin.go index b428d4ce06eff..441030df93e0b 100644 --- a/sdk/database/dbplugin/v5/grpc_database_plugin.go +++ b/sdk/database/dbplugin/v5/grpc_database_plugin.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/grpc_server.go b/sdk/database/dbplugin/v5/grpc_server.go index 7e1bc3fa1fc76..ce3be1efb7c68 100644 --- a/sdk/database/dbplugin/v5/grpc_server.go +++ b/sdk/database/dbplugin/v5/grpc_server.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( @@ -152,7 +149,6 @@ func (g *gRPCServer) NewUser(ctx context.Context, req *proto.NewUserRequest) (*p CredentialType: CredentialType(req.GetCredentialType()), Password: req.GetPassword(), PublicKey: req.GetPublicKey(), - Subject: req.GetSubject(), Expiration: expiration, Statements: getStatementsFromProto(req.GetStatements()), RollbackStatements: getStatementsFromProto(req.GetRollbackStatements()), diff --git a/sdk/database/dbplugin/v5/grpc_server_test.go b/sdk/database/dbplugin/v5/grpc_server_test.go index 53d44c7c2a656..7399bf55789bc 100644 --- a/sdk/database/dbplugin/v5/grpc_server_test.go +++ b/sdk/database/dbplugin/v5/grpc_server_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/marshalling.go b/sdk/database/dbplugin/v5/marshalling.go index 2b3e8cb346acf..e14a21e58335d 100644 --- a/sdk/database/dbplugin/v5/marshalling.go +++ b/sdk/database/dbplugin/v5/marshalling.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/middleware.go b/sdk/database/dbplugin/v5/middleware.go index 2091e672084a6..240d64e6915ea 100644 --- a/sdk/database/dbplugin/v5/middleware.go +++ b/sdk/database/dbplugin/v5/middleware.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/middleware_test.go b/sdk/database/dbplugin/v5/middleware_test.go index a2a76336fb7d0..5dd97cdb9e5a4 100644 --- a/sdk/database/dbplugin/v5/middleware_test.go +++ b/sdk/database/dbplugin/v5/middleware_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/plugin_client.go b/sdk/database/dbplugin/v5/plugin_client.go index b4085ead6cb9c..caea00a8fdaf1 100644 --- a/sdk/database/dbplugin/v5/plugin_client.go +++ b/sdk/database/dbplugin/v5/plugin_client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/plugin_client_test.go b/sdk/database/dbplugin/v5/plugin_client_test.go index 10f02b7bec259..f41ecf8ef4ab0 100644 --- a/sdk/database/dbplugin/v5/plugin_client_test.go +++ b/sdk/database/dbplugin/v5/plugin_client_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( @@ -138,10 +135,6 @@ type mockRunnerUtil struct { mock.Mock } -func (m *mockRunnerUtil) VaultVersion(ctx context.Context) (string, error) { - return "dummyversion", nil -} - func (m *mockRunnerUtil) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) { args := m.Called(ctx, config) return args.Get(0).(pluginutil.PluginClient), args.Error(1) diff --git a/sdk/database/dbplugin/v5/plugin_factory.go b/sdk/database/dbplugin/v5/plugin_factory.go index 4b158c319e264..f68cc5621aa41 100644 --- a/sdk/database/dbplugin/v5/plugin_factory.go +++ b/sdk/database/dbplugin/v5/plugin_factory.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( @@ -58,7 +55,6 @@ func PluginFactoryVersion(ctx context.Context, pluginName string, pluginVersion Logger: namedLogger, IsMetadataMode: false, AutoMTLS: true, - Wrapper: sys, } // create a DatabasePluginClient instance db, err = NewPluginClient(ctx, sys, config) diff --git a/sdk/database/dbplugin/v5/plugin_server.go b/sdk/database/dbplugin/v5/plugin_server.go index 216219df1d7fc..090894ae55217 100644 --- a/sdk/database/dbplugin/v5/plugin_server.go +++ b/sdk/database/dbplugin/v5/plugin_server.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/proto/database.pb.go b/sdk/database/dbplugin/v5/proto/database.pb.go index f152acaa79fd8..f814c60bcec1d 100644 --- a/sdk/database/dbplugin/v5/proto/database.pb.go +++ b/sdk/database/dbplugin/v5/proto/database.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/database/dbplugin/v5/proto/database.proto package proto @@ -142,7 +139,6 @@ type NewUserRequest struct { RollbackStatements *Statements `protobuf:"bytes,5,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` CredentialType int32 `protobuf:"varint,6,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` PublicKey []byte `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` - Subject string `protobuf:"bytes,8,opt,name=subject,proto3" json:"subject,omitempty"` } func (x *NewUserRequest) Reset() { @@ -226,13 +222,6 @@ func (x *NewUserRequest) GetPublicKey() []byte { return nil } -func (x *NewUserRequest) GetSubject() string { - if x != nil { - return x.Subject - } - return "" -} - type UsernameConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -877,7 +866,7 @@ var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x93, 0x03, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, + 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0xf9, 0x02, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, @@ -901,102 +890,100 @@ var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x50, 0x0a, - 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8d, - 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, - 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6c, - 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x70, 0x0a, 0x0f, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, - 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x50, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x8e, - 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x69, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x4b, 0x65, 0x79, 0x22, 0x50, 0x0a, 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8d, 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, - 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x28, 0x0a, 0x0a, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0xa5, 0x03, 0x0a, - 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x49, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x55, - 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, - 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, - 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, - 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, - 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, - 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, - 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x64, - 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, - 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, + 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, + 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, + 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x6c, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, + 0x77, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x22, 0x70, 0x0a, 0x0f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, + 0x65, 0x77, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, + 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, + 0x65, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, + 0x28, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x32, 0xa5, 0x03, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, + 0x4d, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, + 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, + 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, + 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, + 0x73, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, + 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, + 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/sdk/database/dbplugin/v5/proto/database.proto b/sdk/database/dbplugin/v5/proto/database.proto index d6f877b6a549f..b4959f709e1f9 100644 --- a/sdk/database/dbplugin/v5/proto/database.proto +++ b/sdk/database/dbplugin/v5/proto/database.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; package dbplugin.v5; @@ -32,7 +29,6 @@ message NewUserRequest { Statements rollback_statements = 5; int32 credential_type = 6; bytes public_key = 7; - string subject = 8; } message UsernameConfig { diff --git a/sdk/database/dbplugin/v5/testing/test_helpers.go b/sdk/database/dbplugin/v5/testing/test_helpers.go index 83e4af3089ce3..55a402c7fe3c8 100644 --- a/sdk/database/dbplugin/v5/testing/test_helpers.go +++ b/sdk/database/dbplugin/v5/testing/test_helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbtesting import ( diff --git a/sdk/database/helper/connutil/connutil.go b/sdk/database/helper/connutil/connutil.go index 50582aa8196a9..1749b275a2608 100644 --- a/sdk/database/helper/connutil/connutil.go +++ b/sdk/database/helper/connutil/connutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package connutil import ( diff --git a/sdk/database/helper/connutil/sql.go b/sdk/database/helper/connutil/sql.go index d1af4808cb44f..6256ff1a4cf01 100644 --- a/sdk/database/helper/connutil/sql.go +++ b/sdk/database/helper/connutil/sql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package connutil import ( diff --git a/sdk/database/helper/connutil/sql_test.go b/sdk/database/helper/connutil/sql_test.go index 9f29d4ae2c5ab..2ca11b7589860 100644 --- a/sdk/database/helper/connutil/sql_test.go +++ b/sdk/database/helper/connutil/sql_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package connutil import ( diff --git a/sdk/database/helper/credsutil/credsutil.go b/sdk/database/helper/credsutil/credsutil.go index 503999c868b3f..064552d1fa9fb 100644 --- a/sdk/database/helper/credsutil/credsutil.go +++ b/sdk/database/helper/credsutil/credsutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package credsutil import ( diff --git a/sdk/database/helper/credsutil/credsutil_test.go b/sdk/database/helper/credsutil/credsutil_test.go index 77e1a2862f3c2..e094719d07974 100644 --- a/sdk/database/helper/credsutil/credsutil_test.go +++ b/sdk/database/helper/credsutil/credsutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package credsutil import ( diff --git a/sdk/database/helper/credsutil/sql.go b/sdk/database/helper/credsutil/sql.go index 2c27adf37cc12..39fb467a79bf9 100644 --- a/sdk/database/helper/credsutil/sql.go +++ b/sdk/database/helper/credsutil/sql.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package credsutil import ( diff --git a/sdk/database/helper/credsutil/usernames.go b/sdk/database/helper/credsutil/usernames.go index 962208ac9a662..c1e3ccb5298ed 100644 --- a/sdk/database/helper/credsutil/usernames.go +++ b/sdk/database/helper/credsutil/usernames.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package credsutil import ( diff --git a/sdk/database/helper/credsutil/usernames_test.go b/sdk/database/helper/credsutil/usernames_test.go index a3e883491fc28..b1e79ce26d6ed 100644 --- a/sdk/database/helper/credsutil/usernames_test.go +++ b/sdk/database/helper/credsutil/usernames_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package credsutil import ( diff --git a/sdk/database/helper/dbutil/dbutil.go b/sdk/database/helper/dbutil/dbutil.go index efc7e01e13f02..19198bcfdddd7 100644 --- a/sdk/database/helper/dbutil/dbutil.go +++ b/sdk/database/helper/dbutil/dbutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbutil import ( diff --git a/sdk/database/helper/dbutil/dbutil_test.go b/sdk/database/helper/dbutil/dbutil_test.go index 797712b4d902a..64ca9924d390d 100644 --- a/sdk/database/helper/dbutil/dbutil_test.go +++ b/sdk/database/helper/dbutil/dbutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbutil import ( diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index c0527addf999d..73d8f14e60f88 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( "context" "crypto/rand" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -101,7 +97,6 @@ type Backend struct { logger log.Logger system logical.SystemView - events logical.EventSender once sync.Once pathsRe []*regexp.Regexp } @@ -133,10 +128,6 @@ type InitializeFunc func(context.Context, *logical.InitializationRequest) error // the input as defined by request handler prior to JSON marshaling type PatchPreprocessorFunc func(map[string]interface{}) (map[string]interface{}, error) -// ErrNoEvents is returned when attempting to send an event, but when the event -// sender was not passed in during `backend.Setup()`. -var ErrNoEvents = errors.New("no event sender configured") - // Initialize is the logical.Backend implementation. func (b *Backend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { if b.InitializeFunc != nil { @@ -407,7 +398,6 @@ func (b *Backend) InvalidateKey(ctx context.Context, key string) { func (b *Backend) Setup(ctx context.Context, config *logical.BackendConfig) error { b.logger = config.Logger b.system = config.System - b.events = config.EventsSender return nil } @@ -549,18 +539,16 @@ func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error // names in the OAS document. requestResponsePrefix := req.GetString("requestResponsePrefix") - // Build OpenAPI response for the entire backend - vaultVersion := "unknown" - if b.System() != nil { - env, err := b.System().PluginEnv(context.Background()) - if err != nil { - return nil, err - } - vaultVersion = env.VaultVersion - } + // Generic mount paths will primarily be used for code generation purposes. + // This will result in dynamic mount paths being placed instead of + // hardcoded default paths. For example /auth/approle/login would be replaced + // with /auth/{mountPath}/login. This will be replaced for all secrets + // engines and auth methods that are enabled. + genericMountPaths, _ := req.Get("genericMountPaths").(bool) - doc := NewOASDocument(vaultVersion) - if err := documentPaths(b, requestResponsePrefix, doc); err != nil { + // Build OpenAPI response for the entire backend + doc := NewOASDocument() + if err := documentPaths(b, requestResponsePrefix, genericMountPaths, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) } @@ -692,13 +680,6 @@ func (b *Backend) handleWALRollback(ctx context.Context, req *logical.Request) ( return logical.ErrorResponse(merr.Error()), nil } -func (b *Backend) SendEvent(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { - if b.events == nil { - return ErrNoEvents - } - return b.events.Send(ctx, eventType, event) -} - // FieldSchema is a basic schema to describe the format of a path field. type FieldSchema struct { Type FieldType @@ -755,8 +736,6 @@ func (t FieldType) Zero() interface{} { return "" case TypeInt: return 0 - case TypeInt64: - return int64(0) case TypeBool: return false case TypeMap: diff --git a/sdk/framework/backend_test.go b/sdk/framework/backend_test.go index 0b7a2054373d6..9a2b5941457af 100644 --- a/sdk/framework/backend_test.go +++ b/sdk/framework/backend_test.go @@ -1,11 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( "context" - "fmt" "net/http" "reflect" "strings" @@ -815,23 +811,3 @@ func TestInitializeBackend(t *testing.T) { t.Fatal("backend should be open") } } - -// TestFieldTypeMethods tries to ensure our switch-case statements for the -// FieldType "enum" are complete. -func TestFieldTypeMethods(t *testing.T) { - unknownFormat := convertType(TypeInvalid).format - - for i := TypeInvalid + 1; i < typeInvalidMax; i++ { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - if i.String() == TypeInvalid.String() { - t.Errorf("unknown type string for %d", i) - } - - if convertType(i).format == unknownFormat { - t.Errorf("unknown schema for %d", i) - } - - _ = i.Zero() - }) - } -} diff --git a/sdk/framework/field_data.go b/sdk/framework/field_data.go index e5f69acdb81ad..99e3fb7ab8888 100644 --- a/sdk/framework/field_data.go +++ b/sdk/framework/field_data.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( @@ -28,7 +25,7 @@ type FieldData struct { Schema map[string]*FieldSchema } -// Validate cycles through raw data and validates conversions in +// Validate cycles through raw data and validate conversions in // the schema, so we don't get an error/panic later when // trying to get data out. Data not in the schema is not // an error at this point, so we don't worry about it. @@ -56,40 +53,6 @@ func (d *FieldData) Validate() error { return nil } -// ValidateStrict cycles through raw data and validates conversions in the -// schema. In addition to the checks done by Validate, this function ensures -// that the raw data has all of the schema's required fields and does not -// have any fields outside of the schema. It will return a non-nil error if: -// -// 1. a conversion (parsing of the field's value) fails -// 2. a raw field does not exist in the schema (unless the schema is nil) -// 3. a required schema field is missing from the raw data -// -// This function is currently used for validating response schemas in tests. -func (d *FieldData) ValidateStrict() error { - // the schema is nil, nothing to validate - if d.Schema == nil { - return nil - } - - for field := range d.Raw { - if _, _, err := d.GetOkErr(field); err != nil { - return fmt.Errorf("field %q: %w", field, err) - } - } - - for field, schema := range d.Schema { - if !schema.Required { - continue - } - if _, ok := d.Raw[field]; !ok { - return fmt.Errorf("missing required field %q", field) - } - } - - return nil -} - // Get gets the value for the given field. If the key is an invalid field, // FieldData will panic. If you want a safer version of this method, use // GetOk. If the field k is not set, the default value (if set) will be diff --git a/sdk/framework/field_data_test.go b/sdk/framework/field_data_test.go index 078c6fbcd5b8e..d7cbd976157ad 100644 --- a/sdk/framework/field_data_test.go +++ b/sdk/framework/field_data_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( @@ -1160,118 +1157,8 @@ func TestFieldDataGetFirst(t *testing.T) { t.Fatal("should have gotten buzz for fizz") } - _, ok = data.GetFirst("cats") + result, ok = data.GetFirst("cats") if ok { t.Fatal("shouldn't have gotten anything for cats") } } - -func TestValidateStrict(t *testing.T) { - cases := map[string]struct { - Schema map[string]*FieldSchema - Raw map[string]interface{} - ExpectError bool - }{ - "string type, string value": { - map[string]*FieldSchema{ - "foo": {Type: TypeString}, - }, - map[string]interface{}{ - "foo": "bar", - }, - false, - }, - - "string type, int value": { - map[string]*FieldSchema{ - "foo": {Type: TypeString}, - }, - map[string]interface{}{ - "foo": 42, - }, - false, - }, - - "string type, unset value": { - map[string]*FieldSchema{ - "foo": {Type: TypeString}, - }, - map[string]interface{}{}, - false, - }, - - "string type, unset required value": { - map[string]*FieldSchema{ - "foo": { - Type: TypeString, - Required: true, - }, - }, - map[string]interface{}{}, - true, - }, - - "value not in schema": { - map[string]*FieldSchema{ - "foo": { - Type: TypeString, - Required: true, - }, - }, - map[string]interface{}{ - "foo": 42, - "bar": 43, - }, - true, - }, - - "value not in schema, empty schema": { - map[string]*FieldSchema{}, - map[string]interface{}{ - "foo": 42, - "bar": 43, - }, - true, - }, - - "value not in schema, nil schema": { - nil, - map[string]interface{}{ - "foo": 42, - "bar": 43, - }, - false, - }, - - "type time, invalid value": { - map[string]*FieldSchema{ - "foo": {Type: TypeTime}, - }, - map[string]interface{}{ - "foo": "2021-13-11T09:08:07+02:00", - }, - true, - }, - } - - for name, tc := range cases { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - data := &FieldData{ - Raw: tc.Raw, - Schema: tc.Schema, - } - - err := data.ValidateStrict() - - if err == nil && tc.ExpectError == true { - t.Fatalf("expected an error, got nil") - } - if err != nil && tc.ExpectError == false { - t.Fatalf("unexpected error: %v", err) - } - }) - } -} diff --git a/sdk/framework/field_type.go b/sdk/framework/field_type.go index ee07b6afe8664..ef7f08191e1a3 100644 --- a/sdk/framework/field_type.go +++ b/sdk/framework/field_type.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework // FieldType is the enum of types that a field can be. @@ -61,15 +58,11 @@ const ( // TypeFloat parses both float32 and float64 values TypeFloat - // TypeTime represents absolute time. It accepts an RFC3339-formatted + // TypeTime represents absolute time. It accepts an RFC3999-formatted // string (with or without fractional seconds), or an epoch timestamp // formatted as a string or a number. The resulting time.Time // is converted to UTC. TypeTime - - // DO NOT USE. Any new values must be inserted before this value. - // Used to write tests that ensure type methods handle all possible values. - typeInvalidMax ) func (t FieldType) String() string { @@ -82,8 +75,6 @@ func (t FieldType) String() string { return "name string" case TypeInt: return "int" - case TypeInt64: - return "int64" case TypeBool: return "bool" case TypeMap: diff --git a/sdk/framework/filter.go b/sdk/framework/filter.go index b9b99799b9169..faaccba2a864c 100644 --- a/sdk/framework/filter.go +++ b/sdk/framework/filter.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/identity.go b/sdk/framework/identity.go index 157f3c193cf42..ebb2aa4dcc6c0 100644 --- a/sdk/framework/identity.go +++ b/sdk/framework/identity.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/identity_test.go b/sdk/framework/identity_test.go index 1667fb9606360..cb71eefdf9d89 100644 --- a/sdk/framework/identity_test.go +++ b/sdk/framework/identity_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/lease.go b/sdk/framework/lease.go index 24824ca52a24b..4d0240fbe7fd8 100644 --- a/sdk/framework/lease.go +++ b/sdk/framework/lease.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/lease_test.go b/sdk/framework/lease_test.go index 5d1f9f091bbc5..e145c2a82b78b 100644 --- a/sdk/framework/lease_test.go +++ b/sdk/framework/lease_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index 663386a179d64..4d36fefbaee63 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -13,22 +13,21 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" "github.com/mitchellh/mapstructure" - "golang.org/x/text/cases" - "golang.org/x/text/language" ) // OpenAPI specification (OAS): https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md const OASVersion = "3.0.2" // NewOASDocument returns an empty OpenAPI document. -func NewOASDocument(version string) *OASDocument { +func NewOASDocument() *OASDocument { return &OASDocument{ Version: OASVersion, Info: OASInfo{ Title: "HashiCorp Vault API", Description: "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", - Version: version, + Version: version.GetVersion().Version, License: OASLicense{ Name: "Mozilla Public License 2.0", URL: "https://www.mozilla.org/en-US/MPL/2.0", @@ -149,7 +148,6 @@ type OASParameter struct { type OASRequestBody struct { Description string `json:"description,omitempty"` - Required bool `json:"required,omitempty"` Content OASContent `json:"content,omitempty"` } @@ -206,9 +204,9 @@ var ( ) // documentPaths parses all paths in a framework.Backend into OpenAPI paths. -func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocument) error { +func documentPaths(backend *Backend, requestResponsePrefix string, genericMountPaths bool, doc *OASDocument) error { for _, p := range backend.Paths { - if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, backend.BackendType, doc); err != nil { + if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, genericMountPaths, backend.BackendType, doc); err != nil { return err } } @@ -217,7 +215,7 @@ func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocum } // documentPath parses a framework.Path into one or more OpenAPI paths. -func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, backendType logical.BackendType, doc *OASDocument) error { +func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, genericMountPaths bool, backendType logical.BackendType, doc *OASDocument) error { var sudoPaths []string var unauthPaths []string @@ -244,7 +242,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } } - for pathIndex, path := range paths { + for _, path := range paths { // Construct a top level PathItem which will be populated as the path is processed. pi := OASPathItem{ Description: cleanString(p.HelpSynopsis), @@ -252,7 +250,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st pi.Sudo = specialPathMatch(path, sudoPaths) pi.Unauthenticated = specialPathMatch(path, unauthPaths) - pi.DisplayAttrs = withoutOperationHints(p.DisplayAttrs) + pi.DisplayAttrs = p.DisplayAttrs // If the newer style Operations map isn't defined, create one from the legacy fields. operations := p.Operations @@ -271,6 +269,21 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Body fields will be added to individual operations. pathFields, bodyFields := splitFields(p.Fields, path) + if genericMountPaths && requestResponsePrefix != "system" && requestResponsePrefix != "identity" { + // Add mount path as a parameter + p := OASParameter{ + Name: "mountPath", + Description: "Path that the backend was mounted at", + In: "path", + Schema: &OASSchema{ + Type: "string", + }, + Required: true, + } + + pi.Parameters = append(pi.Parameters, p) + } + for name, field := range pathFields { location := "path" required := true @@ -294,7 +307,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st Pattern: t.pattern, Enum: field.AllowedValues, Default: field.Default, - DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + DisplayAttrs: field.DisplayAttrs, }, Required: required, Deprecated: field.Deprecated, @@ -331,19 +344,9 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st op := NewOASOperation() - operationID := constructOperationID( - path, - pathIndex, - p.DisplayAttrs, - opType, - props.DisplayAttrs, - requestResponsePrefix, - ) - op.Summary = props.Summary op.Description = props.Description op.Deprecated = props.Deprecated - op.OperationID = operationID // Add any fields not present in the path as body parameters for POST. if opType == logical.CreateOperation || opType == logical.UpdateOperation { @@ -373,7 +376,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st Enum: field.AllowedValues, Default: field.Default, Deprecated: field.Deprecated, - DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + DisplayAttrs: field.DisplayAttrs, } if openapiField.baseType == "array" { p.Items = &OASSchema{ @@ -395,10 +398,9 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Set the final request body. Only JSON request data is supported. if len(s.Properties) > 0 || s.Example != nil { - requestName := hyphenatedToTitleCase(operationID) + "Request" + requestName := constructRequestName(requestResponsePrefix, path) doc.Components.Schemas[requestName] = s op.RequestBody = &OASRequestBody{ - Required: true, Content: OASContent{ "application/json": &OASMediaTypeObject{ Schema: &OASSchema{Ref: fmt.Sprintf("#/components/schemas/%s", requestName)}, @@ -475,41 +477,6 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } } } - - responseSchema := &OASSchema{ - Type: "object", - Properties: make(map[string]*OASSchema), - } - - for name, field := range resp.Fields { - openapiField := convertType(field.Type) - p := OASSchema{ - Type: openapiField.baseType, - Description: cleanString(field.Description), - Format: openapiField.format, - Pattern: openapiField.pattern, - Enum: field.AllowedValues, - Default: field.Default, - Deprecated: field.Deprecated, - DisplayAttrs: withoutOperationHints(field.DisplayAttrs), - } - if openapiField.baseType == "array" { - p.Items = &OASSchema{ - Type: openapiField.items, - } - } - responseSchema.Properties[name] = &p - } - - if len(resp.Fields) != 0 { - responseName := hyphenatedToTitleCase(operationID) + "Response" - doc.Components.Schemas[responseName] = responseSchema - content = OASContent{ - "application/json": &OASMediaTypeObject{ - Schema: &OASSchema{Ref: fmt.Sprintf("#/components/schemas/%s", responseName)}, - }, - } - } } op.Responses[code] = &OASResponse{ @@ -534,6 +501,30 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st return nil } +// constructRequestName joins the given prefix with the path elements into a +// CamelCaseRequest string. +// +// For example, prefix="kv" & path=/config/lease/{name} => KvConfigLeaseRequest +func constructRequestName(requestResponsePrefix string, path string) string { + var b strings.Builder + + b.WriteString(strings.Title(requestResponsePrefix)) + + // split the path by / _ - separators + for _, token := range strings.FieldsFunc(path, func(r rune) bool { + return r == '/' || r == '_' || r == '-' + }) { + // exclude request fields + if !strings.ContainsAny(token, "{}") { + b.WriteString(strings.Title(token)) + } + } + + b.WriteString("Request") + + return b.String() +} + // specialPathMatch checks whether the given path matches one of the special // paths, taking into account * and + wildcards (e.g. foo/+/bar/*) func specialPathMatch(path string, specialPaths []string) bool { @@ -586,117 +577,6 @@ func specialPathMatch(path string, specialPaths []string) bool { return false } -// constructOperationID joins the given inputs into a hyphen-separated -// lower-case operation id, which is also used as a prefix for request and -// response names. -// -// The OperationPrefix / -Verb / -Suffix found in display attributes will be -// used, if provided. Otherwise, the function falls back to using the path and -// the operation. -// -// Examples of generated operation identifiers: -// - kvv2-write -// - kvv2-read -// - google-cloud-login -// - google-cloud-write-role -func constructOperationID( - path string, - pathIndex int, - pathAttributes *DisplayAttributes, - operation logical.Operation, - operationAttributes *DisplayAttributes, - defaultPrefix string, -) string { - var ( - prefix string - verb string - suffix string - ) - - if operationAttributes != nil { - prefix = operationAttributes.OperationPrefix - verb = operationAttributes.OperationVerb - suffix = operationAttributes.OperationSuffix - } - - if pathAttributes != nil { - if prefix == "" { - prefix = pathAttributes.OperationPrefix - } - if verb == "" { - verb = pathAttributes.OperationVerb - } - if suffix == "" { - suffix = pathAttributes.OperationSuffix - } - } - - // A single suffix string can contain multiple pipe-delimited strings. To - // determine the actual suffix, we attempt to match it by the index of the - // paths returned from `expandPattern(...)`. For example: - // - // pki/ - // Pattern: "keys/generate/(internal|exported|kms)", - // DisplayAttrs: { - // ... - // OperationSuffix: "internal-key|exported-key|kms-key", - // }, - // - // will expand into three paths and corresponding suffixes: - // - // path 0: "keys/generate/internal" suffix: internal-key - // path 1: "keys/generate/exported" suffix: exported-key - // path 2: "keys/generate/kms" suffix: kms-key - // - pathIndexOutOfRange := false - - if suffixes := strings.Split(suffix, "|"); len(suffixes) > 1 || pathIndex > 0 { - // if the index is out of bounds, fall back to the old logic - if pathIndex >= len(suffixes) { - suffix = "" - pathIndexOutOfRange = true - } else { - suffix = suffixes[pathIndex] - } - } - - // a helper that hyphenates & lower-cases the slice except the empty elements - toLowerHyphenate := func(parts []string) string { - filtered := make([]string, 0, len(parts)) - for _, e := range parts { - if e != "" { - filtered = append(filtered, e) - } - } - return strings.ToLower(strings.Join(filtered, "-")) - } - - // fall back to using the path + operation to construct the operation id - var ( - needPrefix = prefix == "" && verb == "" - needVerb = verb == "" - needSuffix = suffix == "" && (verb == "" || pathIndexOutOfRange) - ) - - if needPrefix { - prefix = defaultPrefix - } - - if needVerb { - if operation == logical.UpdateOperation { - verb = "write" - } else { - verb = string(operation) - } - } - - if needSuffix { - suffix = toLowerHyphenate(nonWordRe.Split(path, -1)) - } - - return toLowerHyphenate([]string{prefix, verb, suffix}) -} - // expandPattern expands a regex pattern by generating permutations of any optional parameters // and changing named parameters into their {openapi} equivalents. func expandPattern(pattern string) ([]string, error) { @@ -988,40 +868,6 @@ func splitFields(allFields map[string]*FieldSchema, pattern string) (pathFields, return pathFields, bodyFields } -// withoutOperationHints returns a copy of the given DisplayAttributes without -// OperationPrefix / OperationVerb / OperationSuffix since we don't need these -// fields in the final output. -func withoutOperationHints(in *DisplayAttributes) *DisplayAttributes { - if in == nil { - return nil - } - - copy := *in - - copy.OperationPrefix = "" - copy.OperationVerb = "" - copy.OperationSuffix = "" - - // return nil if all fields are empty to avoid empty JSON objects - if copy == (DisplayAttributes{}) { - return nil - } - - return © -} - -func hyphenatedToTitleCase(in string) string { - var b strings.Builder - - title := cases.Title(language.English, cases.NoLower) - - for _, word := range strings.Split(in, "-") { - b.WriteString(title.String(word)) - } - - return b.String() -} - // cleanedResponse is identical to logical.Response but with nulls // removed from from JSON encoding type cleanedResponse struct { @@ -1056,9 +902,6 @@ func cleanResponse(resp *logical.Response) *cleanedResponse { // postSysToolsRandomUrlbytes_2 // // An optional user-provided suffix ("context") may also be appended. -// -// Deprecated: operationID's are now populated using `constructOperationID`. -// This function is here for backwards compatibility with older plugins. func (d *OASDocument) CreateOperationIDs(context string) { opIDCount := make(map[string]int) var paths []string @@ -1086,13 +929,6 @@ func (d *OASDocument) CreateOperationIDs(context string) { continue } - if oasOperation.OperationID != "" { - continue - } - - // Discard "_mount_path" from any {thing_mount_path} parameters - path = strings.Replace(path, "_mount_path", "", 1) - // Space-split on non-words, title case everything, recombine opID := nonWordRe.ReplaceAllString(strings.ToLower(path), " ") opID = strings.Title(opID) diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index 9e2763f1241e0..5f1bd46130060 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( @@ -18,6 +15,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" ) func TestOpenAPI_Regex(t *testing.T) { @@ -320,7 +318,7 @@ func TestOpenAPI_SpecialPaths(t *testing.T) { } for name, test := range tests { t.Run(name, func(t *testing.T) { - doc := NewOASDocument("version") + doc := NewOASDocument() path := Path{ Pattern: test.pattern, } @@ -329,7 +327,7 @@ func TestOpenAPI_SpecialPaths(t *testing.T) { Unauthenticated: test.unauthenticatedPaths, } - if err := documentPath(&path, specialPaths, "kv", logical.TypeLogical, doc); err != nil { + if err := documentPath(&path, specialPaths, "kv", false, logical.TypeLogical, doc); err != nil { t.Fatal(err) } @@ -537,16 +535,6 @@ func TestOpenAPI_Paths(t *testing.T) { "amount": 42, }, }, - Fields: map[string]*FieldSchema{ - "field_a": { - Type: TypeString, - Description: "field_a description", - }, - "field_b": { - Type: TypeBool, - Description: "field_b description", - }, - }, }}, }, }, @@ -564,6 +552,66 @@ func TestOpenAPI_Paths(t *testing.T) { }) } +func TestOpenAPI_OperationID(t *testing.T) { + path1 := &Path{ + Pattern: "foo/" + GenericNameRegex("id"), + Fields: map[string]*FieldSchema{ + "id": {Type: TypeString}, + }, + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{}, + logical.UpdateOperation: &PathOperation{}, + logical.DeleteOperation: &PathOperation{}, + }, + } + + path2 := &Path{ + Pattern: "Foo/" + GenericNameRegex("id"), + Fields: map[string]*FieldSchema{ + "id": {Type: TypeString}, + }, + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{}, + }, + } + + for _, context := range []string{"", "bar"} { + doc := NewOASDocument() + err := documentPath(path1, nil, "kv", false, logical.TypeLogical, doc) + if err != nil { + t.Fatal(err) + } + err = documentPath(path2, nil, "kv", false, logical.TypeLogical, doc) + if err != nil { + t.Fatal(err) + } + doc.CreateOperationIDs(context) + + tests := []struct { + path string + op string + opID string + }{ + {"/Foo/{id}", "get", "getFooId"}, + {"/foo/{id}", "get", "getFooId_2"}, + {"/foo/{id}", "post", "postFooId"}, + {"/foo/{id}", "delete", "deleteFooId"}, + } + + for _, test := range tests { + actual := getPathOp(doc.Paths[test.path], test.op).OperationID + expected := test.opID + if context != "" { + expected += "_" + context + } + + if actual != expected { + t.Fatalf("expected %v, got %v", expected, actual) + } + } + } +} + func TestOpenAPI_CustomDecoder(t *testing.T) { p := &Path{ Pattern: "foo", @@ -592,8 +640,8 @@ func TestOpenAPI_CustomDecoder(t *testing.T) { }, } - docOrig := NewOASDocument("version") - err := documentPath(p, nil, "kv", logical.TypeLogical, docOrig) + docOrig := NewOASDocument() + err := documentPath(p, nil, "kv", false, logical.TypeLogical, docOrig) if err != nil { t.Fatal(err) } @@ -652,221 +700,11 @@ func TestOpenAPI_CleanResponse(t *testing.T) { } } -func TestOpenAPI_constructOperationID(t *testing.T) { - tests := map[string]struct { - path string - pathIndex int - pathAttributes *DisplayAttributes - operation logical.Operation - operationAttributes *DisplayAttributes - defaultPrefix string - expected string - }{ - "empty": { - path: "", - pathIndex: 0, - pathAttributes: nil, - operation: logical.Operation(""), - operationAttributes: nil, - defaultPrefix: "", - expected: "", - }, - "simple-read": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: nil, - operation: logical.ReadOperation, - operationAttributes: nil, - defaultPrefix: "test", - expected: "test-read-path-to-thing", - }, - "simple-write": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: nil, - operation: logical.UpdateOperation, - operationAttributes: nil, - defaultPrefix: "test", - expected: "test-write-path-to-thing", - }, - "operation-verb": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, - operation: logical.UpdateOperation, - operationAttributes: nil, - defaultPrefix: "test", - expected: "do-something", - }, - "operation-verb-override": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationVerb: "do-something-else"}, - defaultPrefix: "test", - expected: "do-something-else", - }, - "operation-prefix": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, - operation: logical.UpdateOperation, - operationAttributes: nil, - defaultPrefix: "test", - expected: "my-prefix-write-path-to-thing", - }, - "operation-prefix-override": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix"}, - defaultPrefix: "test", - expected: "better-prefix-write-path-to-thing", - }, - "operation-prefix-and-suffix": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, - operation: logical.UpdateOperation, - operationAttributes: nil, - defaultPrefix: "test", - expected: "my-prefix-write-my-suffix", - }, - "operation-prefix-and-suffix-override": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, - defaultPrefix: "test", - expected: "better-prefix-write-better-suffix", - }, - "operation-prefix-verb-suffix": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, - defaultPrefix: "test", - expected: "better-prefix-create-better-suffix", - }, - "operation-prefix-verb-suffix-override": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix", OperationVerb: "Login"}, - defaultPrefix: "test", - expected: "better-prefix-login-better-suffix", - }, - "operation-prefix-verb": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: nil, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationVerb: "Login"}, - defaultPrefix: "test", - expected: "better-prefix-login", - }, - "operation-verb-suffix": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: nil, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationVerb: "Login", OperationSuffix: "better-suffix"}, - defaultPrefix: "test", - expected: "login-better-suffix", - }, - "pipe-delimited-suffix-0": { - path: "path/to/thing", - pathIndex: 0, - pathAttributes: nil, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, - defaultPrefix: "test", - expected: "better-prefix-write-suffix0", - }, - "pipe-delimited-suffix-1": { - path: "path/to/thing", - pathIndex: 1, - pathAttributes: nil, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, - defaultPrefix: "test", - expected: "better-prefix-write-suffix1", - }, - "pipe-delimited-suffix-2-fallback": { - path: "path/to/thing", - pathIndex: 2, - pathAttributes: nil, - operation: logical.UpdateOperation, - operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, - defaultPrefix: "test", - expected: "better-prefix-write-path-to-thing", - }, - } - - for name, test := range tests { - name, test := name, test - t.Run(name, func(t *testing.T) { - t.Parallel() - actual := constructOperationID( - test.path, - test.pathIndex, - test.pathAttributes, - test.operation, - test.operationAttributes, - test.defaultPrefix, - ) - if actual != test.expected { - t.Fatalf("expected: %s; got: %s", test.expected, actual) - } - }) - } -} - -func TestOpenAPI_hyphenatedToTitleCase(t *testing.T) { - tests := map[string]struct { - in string - expected string - }{ - "simple": { - in: "test", - expected: "Test", - }, - "two-words": { - in: "two-words", - expected: "TwoWords", - }, - "three-words": { - in: "one-two-three", - expected: "OneTwoThree", - }, - "not-hyphenated": { - in: "something_like_this", - expected: "Something_like_this", - }, - } - - for name, test := range tests { - name, test := name, test - t.Run(name, func(t *testing.T) { - t.Parallel() - actual := hyphenatedToTitleCase(test.in) - if actual != test.expected { - t.Fatalf("expected: %s; got: %s", test.expected, actual) - } - }) - } -} - func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) { t.Helper() - doc := NewOASDocument("dummyversion") - if err := documentPath(path, sp, "kv", logical.TypeLogical, doc); err != nil { + doc := NewOASDocument() + if err := documentPath(path, sp, "kv", false, logical.TypeLogical, doc); err != nil { t.Fatal(err) } doc.CreateOperationIDs("") @@ -911,7 +749,7 @@ func expected(name string) string { panic(err) } - content := strings.Replace(string(data), "", "dummyversion", 1) + content := strings.Replace(string(data), "", version.GetVersion().Version, 1) return content } diff --git a/sdk/framework/path.go b/sdk/framework/path.go index e96ba4c41bb29..8a8b1c75879d4 100644 --- a/sdk/framework/path.go +++ b/sdk/framework/path.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( @@ -59,24 +56,12 @@ type Path struct { Pattern string // Fields is the mapping of data fields to a schema describing that - // field. - // - // Field values are obtained from: - // - // - Named captures in the Pattern. - // - // - Parameters in the HTTP request body, for HTTP methods where a - // request body is expected, i.e. PUT/POST/PATCH. The request body is - // typically formatted as JSON, though - // "application/x-www-form-urlencoded" format can also be accepted. - // - // - Parameters in the HTTP URL query-string, for HTTP methods where - // there is no request body, i.e. GET/LIST/DELETE. The query-string - // is *not* parsed at all for PUT/POST/PATCH requests. + // field. Named captures in the Pattern also map to fields. If a named + // capture name matches a PUT body name, the named capture takes + // priority. // - // Should the same field be specified both as a named capture and as - // a parameter, the named capture takes precedence, and a warning is - // returned. + // Note that only named capture fields are available in every operation, + // whereas all fields are available in the Write operation. Fields map[string]*FieldSchema // Operations is the set of operations supported and the associated OperationsHandler. @@ -207,11 +192,6 @@ type DisplayAttributes struct { // Name is the name of the field suitable as a label or documentation heading. Name string `json:"name,omitempty"` - // Description of the field that renders as tooltip help text beside the label (name) in the UI. - // This may be used to replace descriptions that reference comma separation but correspond - // to UI inputs where only arrays are valid. For example params with Type: framework.TypeCommaStringSlice - Description string `json:"description,omitempty"` - // Value is a sample value to display for this field. This may be used // to indicate a default value, but it is for display only and completely separate // from any Default member handling. @@ -232,28 +212,6 @@ type DisplayAttributes struct { // Action is the verb to use for the operation. Action string `json:"action,omitempty"` - // OperationPrefix is a hyphenated lower-case string used to construct - // OpenAPI OperationID (prefix + verb + suffix). OperationPrefix is - // typically a human-readable name of the plugin or a prefix shared by - // multiple related endpoints. - OperationPrefix string `json:"operationPrefix,omitempty"` - - // OperationVerb is a hyphenated lower-case string used to construct - // OpenAPI OperationID (prefix + verb + suffix). OperationVerb is typically - // an action to be performed (e.g. "generate", "sign", "login", etc.). If - // not specified, the verb defaults to `logical.Operation.String()` - // (e.g. "read", "list", "delete", "write" for Create/Update) - OperationVerb string `json:"operationVerb,omitempty"` - - // OperationSuffix is a hyphenated lower-case string used to construct - // OpenAPI OperationID (prefix + verb + suffix). It is typically the name - // of the resource on which the action is performed (e.g. "role", - // "credentials", etc.). A pipe (|) separator can be used to list different - // suffixes for various permutations of the `Path.Pattern` regular - // expression. If not specified, the suffix defaults to the `Path.Pattern` - // split by dashes. - OperationSuffix string `json:"operationSuffix,omitempty"` - // EditType is the optional type of form field needed for a property // This is only necessary for a "textarea" or "file" EditType string `json:"editType,omitempty"` @@ -271,10 +229,9 @@ type RequestExample struct { // Response describes and optional demonstrations an operation response. type Response struct { - Description string // summary of the the response and should always be provided - MediaType string // media type of the response, defaulting to "application/json" if empty - Fields map[string]*FieldSchema // the fields present in this response, used to generate openapi response - Example *logical.Response // example response data + Description string // summary of the the response and should always be provided + MediaType string // media type of the response, defaulting to "application/json" if empty + Example *logical.Response // example response data } // PathOperation is a concrete implementation of OperationHandler. @@ -288,7 +245,6 @@ type PathOperation struct { Deprecated bool ForwardPerformanceSecondary bool ForwardPerformanceStandby bool - DisplayAttrs *DisplayAttributes } func (p *PathOperation) Handler() OperationFunc { @@ -305,7 +261,6 @@ func (p *PathOperation) Properties() OperationProperties { Deprecated: p.Deprecated, ForwardPerformanceSecondary: p.ForwardPerformanceSecondary, ForwardPerformanceStandby: p.ForwardPerformanceStandby, - DisplayAttrs: p.DisplayAttrs, } } @@ -361,20 +316,8 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { } // Build OpenAPI response for this path - vaultVersion := "unknown" - if b.System() != nil { - // b.System() should always be non-nil, except tests might create a - // Backend without one. - env, err := b.System().PluginEnv(context.Background()) - if err != nil { - return nil, err - } - if env != nil { - vaultVersion = env.VaultVersion - } - } - doc := NewOASDocument(vaultVersion) - if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, b.BackendType, doc); err != nil { + doc := NewOASDocument() + if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, false, b.BackendType, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) } diff --git a/sdk/framework/path_map.go b/sdk/framework/path_map.go index 46cf4720e96e0..0cba8ea2fb16c 100644 --- a/sdk/framework/path_map.go +++ b/sdk/framework/path_map.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/path_map_test.go b/sdk/framework/path_map_test.go index 3fe6308cbcd24..11e1f37c9d54c 100644 --- a/sdk/framework/path_map_test.go +++ b/sdk/framework/path_map_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/path_struct.go b/sdk/framework/path_struct.go index cba855065ea20..2a2848e585084 100644 --- a/sdk/framework/path_struct.go +++ b/sdk/framework/path_struct.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/path_struct_test.go b/sdk/framework/path_struct_test.go index 88662af5300da..9e81cc2e301c3 100644 --- a/sdk/framework/path_struct_test.go +++ b/sdk/framework/path_struct_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/path_test.go b/sdk/framework/path_test.go index 4541930ed591a..ca359d1f57763 100644 --- a/sdk/framework/path_test.go +++ b/sdk/framework/path_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/policy_map.go b/sdk/framework/policy_map.go index 94accf88ae74c..7befb399545cd 100644 --- a/sdk/framework/policy_map.go +++ b/sdk/framework/policy_map.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/policy_map_test.go b/sdk/framework/policy_map_test.go index b785fddc783c3..6a88b80511850 100644 --- a/sdk/framework/policy_map_test.go +++ b/sdk/framework/policy_map_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/secret.go b/sdk/framework/secret.go index 095bc12b7246a..0c8f0dfcccdf6 100644 --- a/sdk/framework/secret.go +++ b/sdk/framework/secret.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/secret_test.go b/sdk/framework/secret_test.go index 29058dc84e3af..83af4753b6d98 100644 --- a/sdk/framework/secret_test.go +++ b/sdk/framework/secret_test.go @@ -1,4 +1 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework diff --git a/sdk/framework/template.go b/sdk/framework/template.go index d395c8f8dbd5f..3abdd624c55e6 100644 --- a/sdk/framework/template.go +++ b/sdk/framework/template.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/testdata/legacy.json b/sdk/framework/testdata/legacy.json index 548151c6f9e60..cb1f7ebd3f2d6 100644 --- a/sdk/framework/testdata/legacy.json +++ b/sdk/framework/testdata/legacy.json @@ -24,11 +24,9 @@ } ], "get": { - "operationId": "kv-read-lookup-id", + "operationId": "getLookupId", "summary": "Synopsis", - "tags": [ - "secrets" - ], + "tags": ["secrets"], "responses": { "200": { "description": "OK" @@ -36,17 +34,14 @@ } }, "post": { - "operationId": "kv-write-lookup-id", + "operationId": "postLookupId", "summary": "Synopsis", - "tags": [ - "secrets" - ], + "tags": ["secrets"], "requestBody": { - "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvWriteLookupIdRequest" + "$ref": "#/components/schemas/KvLookupRequest" } } } @@ -61,7 +56,7 @@ }, "components": { "schemas": { - "KvWriteLookupIdRequest": { + "KvLookupRequest": { "type": "object", "properties": { "token": { @@ -73,3 +68,4 @@ } } } + diff --git a/sdk/framework/testdata/operations.json b/sdk/framework/testdata/operations.json index 7fca0e2650142..097399c02e0e8 100644 --- a/sdk/framework/testdata/operations.json +++ b/sdk/framework/testdata/operations.json @@ -37,10 +37,8 @@ } ], "get": { - "operationId": "kv-read-foo-id", - "tags": [ - "secrets" - ], + "operationId": "getFooId", + "tags": ["secrets"], "summary": "My Summary", "description": "My Description", "responses": { @@ -60,18 +58,15 @@ ] }, "post": { - "operationId": "kv-write-foo-id", - "tags": [ - "secrets" - ], + "operationId": "postFooId", + "tags": ["secrets"], "summary": "Update Summary", "description": "Update Description", "requestBody": { - "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvWriteFooIdRequest" + "$ref": "#/components/schemas/KvFooRequest" } } } @@ -86,11 +81,9 @@ }, "components": { "schemas": { - "KvWriteFooIdRequest": { + "KvFooRequest": { "type": "object", - "required": [ - "age" - ], + "required": ["age"], "properties": { "flavors": { "type": "array", @@ -102,11 +95,7 @@ "age": { "type": "integer", "description": "the age", - "enum": [ - 1, - 2, - 3 - ], + "enum": [1, 2, 3], "x-vault-displayAttrs": { "name": "Age", "sensitive": true, @@ -123,13 +112,9 @@ "x-abc-token": { "type": "string", "description": "a header value", - "enum": [ - "a", - "b", - "c" - ] + "enum": ["a", "b", "c"] }, - "maximum": { + "maximum" : { "type": "integer", "description": "a maximum value", "format": "int64" diff --git a/sdk/framework/testdata/operations_list.json b/sdk/framework/testdata/operations_list.json index a08208b24fa44..e89622a3c40a1 100644 --- a/sdk/framework/testdata/operations_list.json +++ b/sdk/framework/testdata/operations_list.json @@ -36,10 +36,8 @@ } ], "get": { - "operationId": "kv-list-foo-id", - "tags": [ - "secrets" - ], + "operationId": "getFooId", + "tags": ["secrets"], "summary": "List Summary", "description": "List Description", "responses": { @@ -55,9 +53,7 @@ "in": "query", "schema": { "type": "string", - "enum": [ - "true" - ] + "enum": ["true"] } } ] @@ -65,6 +61,7 @@ } }, "components": { - "schemas": {} + "schemas": { + } } } diff --git a/sdk/framework/testdata/responses.json b/sdk/framework/testdata/responses.json index 98d501ec5e896..b0e197babed5f 100644 --- a/sdk/framework/testdata/responses.json +++ b/sdk/framework/testdata/responses.json @@ -14,10 +14,8 @@ "description": "Synopsis", "x-vault-unauthenticated": true, "delete": { - "operationId": "kv-delete-foo", - "tags": [ - "secrets" - ], + "operationId": "deleteFoo", + "tags": ["secrets"], "summary": "Delete stuff", "responses": { "204": { @@ -26,10 +24,8 @@ } }, "get": { - "operationId": "kv-read-foo", - "tags": [ - "secrets" - ], + "operationId": "getFoo", + "tags": ["secrets"], "summary": "My Summary", "description": "My Description", "responses": { @@ -38,7 +34,11 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvReadFooResponse" + "example": { + "data": { + "amount": 42 + } + } } } } @@ -49,19 +49,7 @@ }, "components": { "schemas": { - "KvReadFooResponse": { - "type": "object", - "properties": { - "field_a": { - "type": "string", - "description": "field_a description" - }, - "field_b": { - "type": "boolean", - "description": "field_b description" - } - } - } } } } + diff --git a/sdk/framework/testing.go b/sdk/framework/testing.go index d2035d676f0c8..a00a3241cf82f 100644 --- a/sdk/framework/testing.go +++ b/sdk/framework/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/wal.go b/sdk/framework/wal.go index b090f03e7bf33..7e7bb1afa959d 100644 --- a/sdk/framework/wal.go +++ b/sdk/framework/wal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/framework/wal_test.go b/sdk/framework/wal_test.go index 040749239c887..958be7e79ecd9 100644 --- a/sdk/framework/wal_test.go +++ b/sdk/framework/wal_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package framework import ( diff --git a/sdk/go.mod b/sdk/go.mod index c4a2ec99fdbcb..8aaca44d4b0c8 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -3,94 +3,61 @@ module github.com/hashicorp/vault/sdk go 1.19 require ( - github.com/armon/go-metrics v0.4.1 + github.com/armon/go-metrics v0.3.9 github.com/armon/go-radix v1.0.0 - github.com/cenkalti/backoff/v3 v3.2.2 - github.com/docker/docker v23.0.4+incompatible - github.com/docker/go-connections v0.4.0 - github.com/evanphx/json-patch/v5 v5.6.0 + github.com/evanphx/json-patch/v5 v5.5.0 github.com/fatih/structs v1.1.0 - github.com/go-ldap/ldap/v3 v3.4.1 - github.com/go-test/deep v1.1.0 + github.com/go-ldap/ldap/v3 v3.1.10 + github.com/go-test/deep v1.0.2 github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 - github.com/google/tink/go v1.7.0 github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-hclog v1.4.0 + github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.8 - github.com/hashicorp/go-retryablehttp v0.7.1 - github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 - github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 + github.com/hashicorp/go-plugin v1.4.3 + github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 github.com/hashicorp/go-secure-stdlib/password v0.1.1 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 github.com/hashicorp/go-sockaddr v1.0.2 - github.com/hashicorp/go-uuid v1.0.3 - github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/go-uuid v1.0.2 + github.com/hashicorp/go-version v1.2.0 github.com/hashicorp/golang-lru v0.5.4 - github.com/hashicorp/hcl v1.0.1-vault-5 - github.com/hashicorp/vault/api v1.9.1 - github.com/mitchellh/copystructure v1.2.0 - github.com/mitchellh/go-testing-interface v1.14.1 + github.com/hashicorp/hcl v1.0.0 + github.com/mitchellh/copystructure v1.0.0 + github.com/mitchellh/go-testing-interface v1.0.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/pierrec/lz4 v2.6.1+incompatible + github.com/pierrec/lz4 v2.5.2+incompatible github.com/ryanuber/go-glob v1.0.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.7.0 go.uber.org/atomic v1.9.0 golang.org/x/crypto v0.6.0 - golang.org/x/net v0.8.0 - golang.org/x/text v0.8.0 - google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/grpc v1.41.0 + google.golang.org/protobuf v1.26.0 ) require ( - github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/containerd/containerd v1.7.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/frankban/quicktest v1.11.3 // indirect - github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect - github.com/klauspost/compress v1.16.5 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 // indirect + github.com/frankban/quicktest v1.10.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.3.1 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/patternmatcher v0.5.0 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect - github.com/opencontainers/runc v1.1.6 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.8.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect - golang.org/x/tools v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.4.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) diff --git a/sdk/go.sum b/sdk/go.sum index a6b0f59b096ce..25e05622e72e8 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -1,16 +1,14 @@ -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= -github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,100 +16,100 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= -github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= -github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0VRxYUc= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= -github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= -github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-ldap/ldap/v3 v3.1.10 h1:7WsKqasmPThNvdl0Q5GPpbTDD/ZD98CfuawrMIuh7qQ= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= -github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 h1:6KMBnfEv0/kLAz0O76sliN5mXbCDcLfs2kP7ssP7+DQ= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= @@ -122,228 +120,206 @@ github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+Er github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= -github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.9.1 h1:LtY/I16+5jVGU8rufyyAkwopgq/HpUnxFBg+QLOAV38= -github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= -github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA= -github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/sdk/helper/authmetadata/auth_metadata.go b/sdk/helper/authmetadata/auth_metadata.go index e490ab359abaa..0fd2bd50f8309 100644 --- a/sdk/helper/authmetadata/auth_metadata.go +++ b/sdk/helper/authmetadata/auth_metadata.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package authmetadata /* diff --git a/sdk/helper/authmetadata/auth_metadata_acc_test.go b/sdk/helper/authmetadata/auth_metadata_acc_test.go index 189c960098d31..39888c69a16cb 100644 --- a/sdk/helper/authmetadata/auth_metadata_acc_test.go +++ b/sdk/helper/authmetadata/auth_metadata_acc_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package authmetadata import ( diff --git a/sdk/helper/authmetadata/auth_metadata_test.go b/sdk/helper/authmetadata/auth_metadata_test.go index a82044f9bc436..62341ebc85fbb 100644 --- a/sdk/helper/authmetadata/auth_metadata_test.go +++ b/sdk/helper/authmetadata/auth_metadata_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package authmetadata import ( diff --git a/sdk/helper/base62/base62.go b/sdk/helper/base62/base62.go index 7d2c7d5ba1589..981face425d45 100644 --- a/sdk/helper/base62/base62.go +++ b/sdk/helper/base62/base62.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package base62 diff --git a/sdk/helper/certutil/certutil_test.go b/sdk/helper/certutil/certutil_test.go index 8b550946121d4..60b4888495f0f 100644 --- a/sdk/helper/certutil/certutil_test.go +++ b/sdk/helper/certutil/certutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package certutil import ( @@ -13,7 +10,6 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" - "encoding/asn1" "encoding/json" "encoding/pem" "fmt" @@ -30,7 +26,7 @@ import ( // Tests converting back and forth between a CertBundle and a ParsedCertBundle. // -// Also tests the GetSubjKeyID, GetHexFormatted, ParseHexFormatted and +// Also tests the GetSubjKeyID, GetHexFormatted, and // ParsedCertBundle.getSigner functions. func TestCertBundleConversion(t *testing.T) { cbuts := []*CertBundle{ @@ -249,10 +245,6 @@ func compareCertBundleToParsedCertBundle(cbut *CertBundle, pcbut *ParsedCertBund return fmt.Errorf("bundle serial number does not match") } - if !bytes.Equal(pcbut.Certificate.SerialNumber.Bytes(), ParseHexFormatted(cb.SerialNumber, ":")) { - return fmt.Errorf("failed re-parsing hex formatted number %s", cb.SerialNumber) - } - switch { case len(pcbut.CAChain) > 0 && len(cb.CAChain) == 0: return fmt.Errorf("parsed bundle ca chain has certs when cert bundle does not") @@ -946,66 +938,6 @@ func TestSignatureAlgorithmRoundTripping(t *testing.T) { } } -// TestParseBasicConstraintExtension Verify extension generation and parsing of x509 basic constraint extensions -// works as expected. -func TestBasicConstraintExtension(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - isCA bool - maxPathLen int - }{ - {"empty-seq", false, -1}, - {"just-ca-true", true, -1}, - {"just-ca-with-maxpathlen", true, 2}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ext, err := CreateBasicConstraintExtension(tt.isCA, tt.maxPathLen) - if err != nil { - t.Fatalf("failed generating basic extension: %v", err) - } - - gotIsCa, gotMaxPathLen, err := ParseBasicConstraintExtension(ext) - if err != nil { - t.Fatalf("failed parsing basic extension: %v", err) - } - - if tt.isCA != gotIsCa { - t.Fatalf("expected isCa (%v) got isCa (%v)", tt.isCA, gotIsCa) - } - - if tt.maxPathLen != gotMaxPathLen { - t.Fatalf("expected maxPathLen (%v) got maxPathLen (%v)", tt.maxPathLen, gotMaxPathLen) - } - }) - } - - t.Run("bad-extension-oid", func(t *testing.T) { - // Test invalid type errors out - _, _, err := ParseBasicConstraintExtension(pkix.Extension{}) - if err == nil { - t.Fatalf("should have failed parsing non-basic constraint extension") - } - }) - - t.Run("garbage-value", func(t *testing.T) { - extraBytes, err := asn1.Marshal("a string") - if err != nil { - t.Fatalf("failed encoding the struct: %v", err) - } - ext := pkix.Extension{ - Id: ExtensionBasicConstraintsOID, - Value: extraBytes, - } - _, _, err = ParseBasicConstraintExtension(ext) - if err == nil { - t.Fatalf("should have failed parsing basic constraint with extra information") - } - }) -} - func genRsaKey(t *testing.T) *rsa.PrivateKey { key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index 28472027f8cd0..5fe0dc71d3c4a 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package certutil import ( @@ -81,11 +78,6 @@ var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ x509.PureEd25519: "Ed25519", } -// OID for RFC 5280 CRL Number extension. -// -// > id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } -var CRLNumberOID = asn1.ObjectIdentifier([]int{2, 5, 29, 20}) - // OID for RFC 5280 Delta CRL Indicator CRL extension. // // > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } @@ -108,13 +100,13 @@ func GetHexFormatted(buf []byte, sep string) string { func ParseHexFormatted(in, sep string) []byte { var ret bytes.Buffer var err error - var inBits uint64 + var inBits int64 inBytes := strings.Split(in, sep) for _, inByte := range inBytes { - if inBits, err = strconv.ParseUint(inByte, 16, 8); err != nil { + if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil { return nil } - ret.WriteByte(uint8(inBits)) + ret.WriteByte(byte(inBits)) } return ret.Bytes() } @@ -1041,10 +1033,7 @@ func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x } } -var ( - ExtensionBasicConstraintsOID = []int{2, 5, 29, 19} - ExtensionSubjectAltNameOID = []int{2, 5, 29, 17} -) +var oidExtensionBasicConstraints = []int{2, 5, 29, 19} // CreateCSR creates a CSR with the default rand.Reader to // generate a cert/keypair. This is currently only meant @@ -1098,7 +1087,7 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} } ext := pkix.Extension{ - Id: ExtensionBasicConstraintsOID, + Id: oidExtensionBasicConstraints, Value: val, Critical: true, } @@ -1219,7 +1208,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun certTemplate.URIs = data.CSR.URIs for _, name := range data.CSR.Extensions { - if !name.Id.Equal(ExtensionBasicConstraintsOID) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(ExtensionSubjectAltNameOID)) { + if !name.Id.Equal(oidExtensionBasicConstraints) { certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) } } @@ -1392,68 +1381,3 @@ func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) Value: bigNumValue, }, nil } - -// ParseBasicConstraintExtension parses a basic constraint pkix.Extension, useful if attempting to validate -// CSRs are requesting CA privileges as Go does not expose its implementation. Values returned are -// IsCA, MaxPathLen or error. If MaxPathLen was not set, a value of -1 will be returned. -func ParseBasicConstraintExtension(ext pkix.Extension) (bool, int, error) { - if !ext.Id.Equal(ExtensionBasicConstraintsOID) { - return false, -1, fmt.Errorf("passed in extension was not a basic constraint extension") - } - - // All elements are set to optional here, as it is possible that we receive a CSR with the extension - // containing an empty sequence by spec. - type basicConstraints struct { - IsCA bool `asn1:"optional"` - MaxPathLen int `asn1:"optional,default:-1"` - } - bc := &basicConstraints{} - leftOver, err := asn1.Unmarshal(ext.Value, bc) - if err != nil { - return false, -1, fmt.Errorf("failed unmarshalling extension value: %w", err) - } - - numLeftOver := len(bytes.TrimSpace(leftOver)) - if numLeftOver > 0 { - return false, -1, fmt.Errorf("%d extra bytes within basic constraints value extension", numLeftOver) - } - - return bc.IsCA, bc.MaxPathLen, nil -} - -// CreateBasicConstraintExtension create a basic constraint extension based on inputs, -// if isCa is false, an empty value sequence will be returned with maxPath being -// ignored. If isCa is true maxPath can be set to -1 to not set a maxPath value. -func CreateBasicConstraintExtension(isCa bool, maxPath int) (pkix.Extension, error) { - var asn1Bytes []byte - var err error - - switch { - case isCa && maxPath >= 0: - CaAndMaxPathLen := struct { - IsCa bool `asn1:""` - MaxPathLen int `asn1:""` - }{ - IsCa: isCa, - MaxPathLen: maxPath, - } - asn1Bytes, err = asn1.Marshal(CaAndMaxPathLen) - case isCa && maxPath < 0: - justCa := struct { - IsCa bool `asn1:""` - }{IsCa: isCa} - asn1Bytes, err = asn1.Marshal(justCa) - default: - asn1Bytes, err = asn1.Marshal(struct{}{}) - } - - if err != nil { - return pkix.Extension{}, err - } - - return pkix.Extension{ - Id: ExtensionBasicConstraintsOID, - Critical: true, - Value: asn1Bytes, - }, nil -} diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 039ff8a52291a..15b816f0c8ea0 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Package certutil contains helper functions that are mostly used // with the PKI backend but can be generally useful. Functionality // includes helpers for converting a certificate/private key bundle @@ -1016,6 +1013,3 @@ func CreatePolicyInformationExtensionFromStorageStrings(policyIdentifiers []stri Value: asn1Bytes, }, nil } - -// Subject Attribute OIDs -var SubjectPilotUserIDAttributeOID = asn1.ObjectIdentifier{0, 9, 2342, 19200300, 100, 1, 1} diff --git a/sdk/helper/cidrutil/cidr.go b/sdk/helper/cidrutil/cidr.go index 9d2a41829c4c3..7e48c2be5034b 100644 --- a/sdk/helper/cidrutil/cidr.go +++ b/sdk/helper/cidrutil/cidr.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cidrutil import ( diff --git a/sdk/helper/cidrutil/cidr_test.go b/sdk/helper/cidrutil/cidr_test.go index e6fc5764452f2..6a8662cdf17ff 100644 --- a/sdk/helper/cidrutil/cidr_test.go +++ b/sdk/helper/cidrutil/cidr_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cidrutil import ( diff --git a/sdk/helper/compressutil/compress.go b/sdk/helper/compressutil/compress.go index 9e96d8dd32ec8..924f82a2a1baa 100644 --- a/sdk/helper/compressutil/compress.go +++ b/sdk/helper/compressutil/compress.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package compressutil import ( diff --git a/sdk/helper/compressutil/compress_test.go b/sdk/helper/compressutil/compress_test.go index 7d90ce87e982f..f85f3c935ba1b 100644 --- a/sdk/helper/compressutil/compress_test.go +++ b/sdk/helper/compressutil/compress_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package compressutil import ( diff --git a/sdk/helper/consts/agent.go b/sdk/helper/consts/agent.go index 53b8b8e2e76e6..55be844e14ed2 100644 --- a/sdk/helper/consts/agent.go +++ b/sdk/helper/consts/agent.go @@ -1,13 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts // AgentPathCacheClear is the path that the agent will use as its cache-clear // endpoint. const AgentPathCacheClear = "/agent/v1/cache-clear" -// AgentPathMetrics is the path the agent will use to expose its internal +// AgentPathMetrics is the path the the agent will use to expose its internal // metrics. const AgentPathMetrics = "/agent/v1/metrics" diff --git a/sdk/helper/consts/consts.go b/sdk/helper/consts/consts.go index 744d2aa81c720..c431e2e594194 100644 --- a/sdk/helper/consts/consts.go +++ b/sdk/helper/consts/consts.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts const ( @@ -37,10 +34,4 @@ const ( ReplicationResolverALPN = "replication_resolver_v1" VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK" - - VaultDisableUserLockout = "VAULT_DISABLE_USER_LOCKOUT" - - PerformanceReplicationPathTarget = "performance" - - DRReplicationPathParget = "dr" ) diff --git a/sdk/helper/consts/deprecation_status.go b/sdk/helper/consts/deprecation_status.go index e72292bee6c3e..656d6cc992a75 100644 --- a/sdk/helper/consts/deprecation_status.go +++ b/sdk/helper/consts/deprecation_status.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts // EnvVaultAllowPendingRemovalMounts allows Pending Removal builtins to be diff --git a/sdk/helper/consts/error.go b/sdk/helper/consts/error.go index 5bd3f5e6e2612..1a9175c6392df 100644 --- a/sdk/helper/consts/error.go +++ b/sdk/helper/consts/error.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts import "errors" diff --git a/sdk/helper/consts/plugin_types.go b/sdk/helper/consts/plugin_types.go index 6bc14b54f7167..e0a00e4860c66 100644 --- a/sdk/helper/consts/plugin_types.go +++ b/sdk/helper/consts/plugin_types.go @@ -1,12 +1,5 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts -// NOTE: this file has been copied to -// https://github.com/hashicorp/vault/blob/main/api/plugin_types.go -// Any changes made should be made to both files at the same time. - import "fmt" var PluginTypes = []PluginType{ diff --git a/sdk/helper/consts/proxy.go b/sdk/helper/consts/proxy.go deleted file mode 100644 index 0fc4117ccc1d7..0000000000000 --- a/sdk/helper/consts/proxy.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package consts - -// ProxyPathCacheClear is the path that the proxy will use as its cache-clear -// endpoint. -const ProxyPathCacheClear = "/proxy/v1/cache-clear" - -// ProxyPathMetrics is the path the proxy will use to expose its internal -// metrics. -const ProxyPathMetrics = "/proxy/v1/metrics" - -// ProxyPathQuit is the path that the proxy will use to trigger stopping it. -const ProxyPathQuit = "/proxy/v1/quit" diff --git a/sdk/helper/consts/replication.go b/sdk/helper/consts/replication.go index 2a1511a9a9338..f72c2f47aee2e 100644 --- a/sdk/helper/consts/replication.go +++ b/sdk/helper/consts/replication.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts const ( diff --git a/sdk/helper/consts/token_consts.go b/sdk/helper/consts/token_consts.go index 108e7ba42d78a..2b4e0278bf287 100644 --- a/sdk/helper/consts/token_consts.go +++ b/sdk/helper/consts/token_consts.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consts const ( diff --git a/sdk/helper/cryptoutil/cryptoutil.go b/sdk/helper/cryptoutil/cryptoutil.go index 956dad3408783..a37086c645d80 100644 --- a/sdk/helper/cryptoutil/cryptoutil.go +++ b/sdk/helper/cryptoutil/cryptoutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cryptoutil import "golang.org/x/crypto/blake2b" diff --git a/sdk/helper/cryptoutil/cryptoutil_test.go b/sdk/helper/cryptoutil/cryptoutil_test.go index 35799e42a2eae..a277e4fcee400 100644 --- a/sdk/helper/cryptoutil/cryptoutil_test.go +++ b/sdk/helper/cryptoutil/cryptoutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package cryptoutil import "testing" diff --git a/sdk/helper/custommetadata/custom_metadata.go b/sdk/helper/custommetadata/custom_metadata.go index 81d4c27035d22..7d4ff8763d116 100644 --- a/sdk/helper/custommetadata/custom_metadata.go +++ b/sdk/helper/custommetadata/custom_metadata.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package custommetadata import ( diff --git a/sdk/helper/custommetadata/custom_metadata_test.go b/sdk/helper/custommetadata/custom_metadata_test.go index 2b25d991203c0..e71bd59462fed 100644 --- a/sdk/helper/custommetadata/custom_metadata_test.go +++ b/sdk/helper/custommetadata/custom_metadata_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package custommetadata import ( diff --git a/sdk/helper/dbtxn/dbtxn.go b/sdk/helper/dbtxn/dbtxn.go index 12288d5b37c82..133b360e73e82 100644 --- a/sdk/helper/dbtxn/dbtxn.go +++ b/sdk/helper/dbtxn/dbtxn.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package dbtxn import ( diff --git a/sdk/helper/docker/testhelpers.go b/sdk/helper/docker/testhelpers.go deleted file mode 100644 index 7902750d6dc52..0000000000000 --- a/sdk/helper/docker/testhelpers.go +++ /dev/null @@ -1,911 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package docker - -import ( - "archive/tar" - "bufio" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff/v3" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/go-connections/nat" - "github.com/hashicorp/go-uuid" -) - -const DockerAPIVersion = "1.40" - -type Runner struct { - DockerAPI *client.Client - RunOptions RunOptions -} - -type RunOptions struct { - ImageRepo string - ImageTag string - ContainerName string - Cmd []string - Entrypoint []string - Env []string - NetworkName string - NetworkID string - CopyFromTo map[string]string - Ports []string - DoNotAutoRemove bool - AuthUsername string - AuthPassword string - OmitLogTimestamps bool - LogConsumer func(string) - Capabilities []string - PreDelete bool - PostStart func(string, string) error - LogStderr io.Writer - LogStdout io.Writer - VolumeNameToMountPoint map[string]string -} - -func NewDockerAPI() (*client.Client, error) { - return client.NewClientWithOpts(client.FromEnv, client.WithVersion(DockerAPIVersion)) -} - -func NewServiceRunner(opts RunOptions) (*Runner, error) { - dapi, err := NewDockerAPI() - if err != nil { - return nil, err - } - - if opts.NetworkName == "" { - opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") - } - if opts.NetworkName != "" { - nets, err := dapi.NetworkList(context.TODO(), types.NetworkListOptions{ - Filters: filters.NewArgs(filters.Arg("name", opts.NetworkName)), - }) - if err != nil { - return nil, err - } - if len(nets) != 1 { - return nil, fmt.Errorf("expected exactly one docker network named %q, got %d", opts.NetworkName, len(nets)) - } - opts.NetworkID = nets[0].ID - } - if opts.NetworkID == "" { - opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") - } - if opts.ContainerName == "" { - if strings.Contains(opts.ImageRepo, "/") { - return nil, fmt.Errorf("ContainerName is required for non-library images") - } - // If there's no slash in the repo it's almost certainly going to be - // a good container name. - opts.ContainerName = opts.ImageRepo - } - return &Runner{ - DockerAPI: dapi, - RunOptions: opts, - }, nil -} - -type ServiceConfig interface { - Address() string - URL() *url.URL -} - -func NewServiceHostPort(host string, port int) *ServiceHostPort { - return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} -} - -func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { - pieces := strings.Split(s, ":") - if len(pieces) != 2 { - return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) - } - - port, err := strconv.Atoi(pieces[1]) - if err != nil || port < 1 { - return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) - } - - return &ServiceHostPort{s}, nil -} - -type ServiceHostPort struct { - address string -} - -func (s ServiceHostPort) Address() string { - return s.address -} - -func (s ServiceHostPort) URL() *url.URL { - return &url.URL{Host: s.address} -} - -func NewServiceURLParse(s string) (*ServiceURL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - return &ServiceURL{u: *u}, nil -} - -func NewServiceURL(u url.URL) *ServiceURL { - return &ServiceURL{u: u} -} - -type ServiceURL struct { - u url.URL -} - -func (s ServiceURL) Address() string { - return s.u.Host -} - -func (s ServiceURL) URL() *url.URL { - return &s.u -} - -// ServiceAdapter verifies connectivity to the service, then returns either the -// connection string (typically a URL) and nil, or empty string and an error. -type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) - -// StartService will start the runner's configured docker container with a -// random UUID suffix appended to the name to make it unique and will return -// either a hostname or local address depending on if a Docker network was given. -// -// Most tests can default to using this. -func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { - serv, _, err := d.StartNewService(ctx, true, false, connect) - - return serv, err -} - -type LogConsumerWriter struct { - consumer func(string) -} - -func (l LogConsumerWriter) Write(p []byte) (n int, err error) { - // TODO this assumes that we're never passed partial log lines, which - // seems a safe assumption for now based on how docker looks to implement - // logging, but might change in the future. - scanner := bufio.NewScanner(bytes.NewReader(p)) - scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) - for scanner.Scan() { - l.consumer(scanner.Text()) - } - return len(p), nil -} - -var _ io.Writer = &LogConsumerWriter{} - -// StartNewService will start the runner's configured docker container but with the -// ability to control adding a name suffix or forcing a local address to be returned. -// 'addSuffix' will add a random UUID to the end of the container name. -// 'forceLocalAddr' will force the container address returned to be in the -// form of '127.0.0.1:1234' where 1234 is the mapped container port. -func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) { - if d.RunOptions.PreDelete { - name := d.RunOptions.ContainerName - matches, err := d.DockerAPI.ContainerList(ctx, types.ContainerListOptions{ - All: true, - // TODO use labels to ensure we don't delete anything we shouldn't - Filters: filters.NewArgs( - filters.Arg("name", name), - ), - }) - if err != nil { - return nil, "", fmt.Errorf("failed to list containers named %q", name) - } - for _, cont := range matches { - err = d.DockerAPI.ContainerRemove(ctx, cont.ID, types.ContainerRemoveOptions{Force: true}) - if err != nil { - return nil, "", fmt.Errorf("failed to pre-delete container named %q", name) - } - } - } - result, err := d.Start(context.Background(), addSuffix, forceLocalAddr) - if err != nil { - return nil, "", err - } - - var wg sync.WaitGroup - consumeLogs := false - var logStdout, logStderr io.Writer - if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil { - consumeLogs = true - logStdout = d.RunOptions.LogStdout - logStderr = d.RunOptions.LogStderr - } else if d.RunOptions.LogConsumer != nil { - consumeLogs = true - logStdout = &LogConsumerWriter{d.RunOptions.LogConsumer} - logStderr = &LogConsumerWriter{d.RunOptions.LogConsumer} - } - - // The waitgroup wg is used here to support some stuff in NewDockerCluster. - // We can't generate the PKI cert for the https listener until we know the - // container's address, meaning we must first start the container, then - // generate the cert, then copy it into the container, then signal Vault - // to reload its config/certs. However, if we SIGHUP Vault before Vault - // has installed its signal handler, that will kill Vault, since the default - // behaviour for HUP is termination. So the PostStart that NewDockerCluster - // passes in (which does all that PKI cert stuff) waits to see output from - // Vault on stdout/stderr before it sends the signal, and we don't want to - // run the PostStart until we've hooked into the docker logs. - if consumeLogs { - wg.Add(1) - go func() { - // We must run inside a goroutine because we're using Follow:true, - // and StdCopy will block until the log stream is closed. - stream, err := d.DockerAPI.ContainerLogs(context.Background(), result.Container.ID, types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Timestamps: !d.RunOptions.OmitLogTimestamps, - Details: true, - Follow: true, - }) - wg.Done() - if err != nil { - d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err)) - } else { - _, err := stdcopy.StdCopy(logStdout, logStderr, stream) - if err != nil { - d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err)) - } - } - }() - } - wg.Wait() - - if d.RunOptions.PostStart != nil { - if err := d.RunOptions.PostStart(result.Container.ID, result.RealIP); err != nil { - return nil, "", fmt.Errorf("poststart failed: %w", err) - } - } - - cleanup := func() { - for i := 0; i < 10; i++ { - err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, types.ContainerRemoveOptions{Force: true}) - if err == nil || client.IsErrNotFound(err) { - return - } - time.Sleep(1 * time.Second) - } - } - - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = time.Second * 5 - bo.MaxElapsedTime = 2 * time.Minute - - pieces := strings.Split(result.Addrs[0], ":") - portInt, err := strconv.Atoi(pieces[1]) - if err != nil { - return nil, "", err - } - - var config ServiceConfig - err = backoff.Retry(func() error { - container, err := d.DockerAPI.ContainerInspect(ctx, result.Container.ID) - if err != nil || !container.State.Running { - return backoff.Permanent(fmt.Errorf("failed inspect or container %q not running: %w", result.Container.ID, err)) - } - - c, err := connect(ctx, pieces[0], portInt) - if err != nil { - return err - } - if c == nil { - return fmt.Errorf("service adapter returned nil error and config") - } - config = c - return nil - }, bo) - - if err != nil { - if !d.RunOptions.DoNotAutoRemove { - cleanup() - } - return nil, "", err - } - - return &Service{ - Config: config, - Cleanup: cleanup, - Container: result.Container, - StartResult: result, - }, result.Container.ID, nil -} - -type Service struct { - Config ServiceConfig - Cleanup func() - Container *types.ContainerJSON - StartResult *StartResult -} - -type StartResult struct { - Container *types.ContainerJSON - Addrs []string - RealIP string -} - -func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*StartResult, error) { - name := d.RunOptions.ContainerName - if addSuffix { - suffix, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - name += "-" + suffix - } - - cfg := &container.Config{ - Hostname: name, - Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), - Env: d.RunOptions.Env, - Cmd: d.RunOptions.Cmd, - } - if len(d.RunOptions.Ports) > 0 { - cfg.ExposedPorts = make(map[nat.Port]struct{}) - for _, p := range d.RunOptions.Ports { - cfg.ExposedPorts[nat.Port(p)] = struct{}{} - } - } - if len(d.RunOptions.Entrypoint) > 0 { - cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) - } - - hostConfig := &container.HostConfig{ - AutoRemove: !d.RunOptions.DoNotAutoRemove, - PublishAllPorts: true, - } - if len(d.RunOptions.Capabilities) > 0 { - hostConfig.CapAdd = d.RunOptions.Capabilities - } - - netConfig := &network.NetworkingConfig{} - if d.RunOptions.NetworkID != "" { - netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - d.RunOptions.NetworkID: {}, - } - } - - // best-effort pull - var opts types.ImageCreateOptions - if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { - var buf bytes.Buffer - auth := map[string]string{ - "username": d.RunOptions.AuthUsername, - "password": d.RunOptions.AuthPassword, - } - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) - if resp != nil { - _, _ = ioutil.ReadAll(resp) - } - - for vol, mtpt := range d.RunOptions.VolumeNameToMountPoint { - hostConfig.Mounts = append(hostConfig.Mounts, mount.Mount{ - Type: "volume", - Source: vol, - Target: mtpt, - ReadOnly: false, - }) - } - - c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) - if err != nil { - return nil, fmt.Errorf("container create failed: %v", err) - } - - for from, to := range d.RunOptions.CopyFromTo { - if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, err - } - } - - err = d.DockerAPI.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) - if err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, fmt.Errorf("container start failed: %v", err) - } - - inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) - if err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, err - } - - var addrs []string - for _, port := range d.RunOptions.Ports { - pieces := strings.Split(port, "/") - if len(pieces) < 2 { - return nil, fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) - } - if d.RunOptions.NetworkID != "" && !forceLocalAddr { - addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) - } else { - mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] - if !ok || len(mapped) == 0 { - return nil, fmt.Errorf("no port mapping found for %s", port) - } - addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) - } - } - - var realIP string - if d.RunOptions.NetworkID == "" { - if len(inspect.NetworkSettings.Networks) > 1 { - return nil, fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", inspect.NetworkSettings.Networks) - } - for _, network := range inspect.NetworkSettings.Networks { - realIP = network.IPAddress - break - } - } else { - realIP = inspect.NetworkSettings.Networks[d.RunOptions.NetworkName].IPAddress - } - - return &StartResult{ - Container: &inspect, - Addrs: addrs, - RealIP: realIP, - }, nil -} - -func (d *Runner) RefreshFiles(ctx context.Context, containerID string) error { - for from, to := range d.RunOptions.CopyFromTo { - if err := copyToContainer(ctx, d.DockerAPI, containerID, from, to); err != nil { - // TODO too drastic? - _ = d.DockerAPI.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{}) - return err - } - } - return d.DockerAPI.ContainerKill(ctx, containerID, "SIGHUP") -} - -func (d *Runner) Stop(ctx context.Context, containerID string) error { - if d.RunOptions.NetworkID != "" { - if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil { - return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err) - } - } - - // timeout in seconds - timeout := 5 - options := container.StopOptions{ - Timeout: &timeout, - } - if err := d.DockerAPI.ContainerStop(ctx, containerID, options); err != nil { - return fmt.Errorf("error stopping container: %v", err) - } - - return nil -} - -func (d *Runner) Restart(ctx context.Context, containerID string) error { - if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil { - return err - } - - ends := &network.EndpointSettings{ - NetworkID: d.RunOptions.NetworkID, - } - - return d.DockerAPI.NetworkConnect(ctx, d.RunOptions.NetworkID, containerID, ends) -} - -func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { - srcInfo, err := archive.CopyInfoSourcePath(from, false) - if err != nil { - return fmt.Errorf("error copying from source %q: %v", from, err) - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return fmt.Errorf("error creating tar from source %q: %v", from, err) - } - defer srcArchive.Close() - - dstInfo := archive.CopyInfo{Path: to} - - dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) - } - defer content.Close() - err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) - if err != nil { - return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) - } - - return nil -} - -type RunCmdOpt interface { - Apply(cfg *types.ExecConfig) error -} - -type RunCmdUser string - -var _ RunCmdOpt = (*RunCmdUser)(nil) - -func (u RunCmdUser) Apply(cfg *types.ExecConfig) error { - cfg.User = string(u) - return nil -} - -func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { - return RunCmdWithOutput(d.DockerAPI, ctx, container, cmd, opts...) -} - -func RunCmdWithOutput(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { - runCfg := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - - for index, opt := range opts { - if err := opt.Apply(&runCfg); err != nil { - return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) - } - } - - ret, err := api.ContainerExecCreate(ctx, container, runCfg) - if err != nil { - return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg) - } - - resp, err := api.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) - if err != nil { - return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret) - } - defer resp.Close() - - var stdoutB bytes.Buffer - var stderrB bytes.Buffer - if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil { - return nil, nil, -1, fmt.Errorf("error reading command output: %v", err) - } - - stdout := stdoutB.Bytes() - stderr := stderrB.Bytes() - - // Fetch return code. - info, err := api.ContainerExecInspect(ctx, ret.ID) - if err != nil { - return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err) - } - - return stdout, stderr, info.ExitCode, nil -} - -func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { - return RunCmdInBackground(d.DockerAPI, ctx, container, cmd, opts...) -} - -func RunCmdInBackground(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { - runCfg := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - - for index, opt := range opts { - if err := opt.Apply(&runCfg); err != nil { - return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) - } - } - - ret, err := api.ContainerExecCreate(ctx, container, runCfg) - if err != nil { - return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg) - } - - err = api.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) - if err != nil { - return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret) - } - - return ret.ID, nil -} - -// Mapping of path->contents -type PathContents interface { - UpdateHeader(header *tar.Header) error - Get() ([]byte, error) - SetMode(mode int64) - SetOwners(uid int, gid int) -} - -type FileContents struct { - Data []byte - Mode int64 - UID int - GID int -} - -func (b FileContents) UpdateHeader(header *tar.Header) error { - header.Mode = b.Mode - header.Uid = b.UID - header.Gid = b.GID - return nil -} - -func (b FileContents) Get() ([]byte, error) { - return b.Data, nil -} - -func (b *FileContents) SetMode(mode int64) { - b.Mode = mode -} - -func (b *FileContents) SetOwners(uid int, gid int) { - b.UID = uid - b.GID = gid -} - -func PathContentsFromBytes(data []byte) PathContents { - return &FileContents{ - Data: data, - Mode: 0o644, - } -} - -func PathContentsFromString(data string) PathContents { - return PathContentsFromBytes([]byte(data)) -} - -type BuildContext map[string]PathContents - -func NewBuildContext() BuildContext { - return BuildContext{} -} - -func BuildContextFromTarball(reader io.Reader) (BuildContext, error) { - archive := tar.NewReader(reader) - bCtx := NewBuildContext() - - for true { - header, err := archive.Next() - if err != nil { - if err == io.EOF { - break - } - - return nil, fmt.Errorf("failed to parse provided tarball: %v", err) - } - - data := make([]byte, int(header.Size)) - read, err := archive.Read(data) - if err != nil { - return nil, fmt.Errorf("failed to parse read from provided tarball: %v", err) - } - - if read != int(header.Size) { - return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size) - } - - bCtx[header.Name] = &FileContents{ - Data: data, - Mode: header.Mode, - UID: header.Uid, - GID: header.Gid, - } - } - - return bCtx, nil -} - -func (bCtx *BuildContext) ToTarball() (io.Reader, error) { - var err error - buffer := new(bytes.Buffer) - tarBuilder := tar.NewWriter(buffer) - defer tarBuilder.Close() - - now := time.Now() - for filepath, contents := range *bCtx { - fileHeader := &tar.Header{ - Name: filepath, - ModTime: now, - AccessTime: now, - ChangeTime: now, - } - if contents == nil && !strings.HasSuffix(filepath, "/") { - return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath) - } - - if err := contents.UpdateHeader(fileHeader); err != nil { - return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err) - } - - var rawContents []byte - if contents != nil { - rawContents, err = contents.Get() - if err != nil { - return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err) - } - - fileHeader.Size = int64(len(rawContents)) - } - - if err := tarBuilder.WriteHeader(fileHeader); err != nil { - return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err) - } - - if contents != nil { - if _, err := tarBuilder.Write(rawContents); err != nil { - return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err) - } - } - } - - return bytes.NewReader(buffer.Bytes()), nil -} - -type BuildOpt interface { - Apply(cfg *types.ImageBuildOptions) error -} - -type BuildRemove bool - -var _ BuildOpt = (*BuildRemove)(nil) - -func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error { - cfg.Remove = bool(u) - return nil -} - -type BuildForceRemove bool - -var _ BuildOpt = (*BuildForceRemove)(nil) - -func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error { - cfg.ForceRemove = bool(u) - return nil -} - -type BuildPullParent bool - -var _ BuildOpt = (*BuildPullParent)(nil) - -func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error { - cfg.PullParent = bool(u) - return nil -} - -type BuildArgs map[string]*string - -var _ BuildOpt = (*BuildArgs)(nil) - -func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error { - cfg.BuildArgs = u - return nil -} - -type BuildTags []string - -var _ BuildOpt = (*BuildTags)(nil) - -func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error { - cfg.Tags = u - return nil -} - -const containerfilePath = "_containerfile" - -func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { - return BuildImage(ctx, d.DockerAPI, containerfile, containerContext, opts...) -} - -func BuildImage(ctx context.Context, api *client.Client, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { - var cfg types.ImageBuildOptions - - // Build container context tarball, provisioning containerfile in. - containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile)) - tar, err := containerContext.ToTarball() - if err != nil { - return nil, fmt.Errorf("failed to create build image context tarball: %w", err) - } - cfg.Dockerfile = "/" + containerfilePath - - // Apply all given options - for index, opt := range opts { - if err := opt.Apply(&cfg); err != nil { - return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err) - } - } - - resp, err := api.ImageBuild(ctx, tar, cfg) - if err != nil { - return nil, fmt.Errorf("failed to build image: %v", err) - } - - output, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read image build output: %w", err) - } - - return output, nil -} - -func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error { - // XXX: currently we use the default options but we might want to allow - // modifying cfg.CopyUIDGID in the future. - var cfg types.CopyToContainerOptions - - // Convert our provided contents to a tarball to ship up. - tar, err := contents.ToTarball() - if err != nil { - return fmt.Errorf("failed to build contents into tarball: %v", err) - } - - return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg) -} - -func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) { - reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source) - if err != nil { - return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err) - } - - result, err := BuildContextFromTarball(reader) - if err != nil { - return nil, nil, fmt.Errorf("failed to build archive from result: %v", err) - } - - return result, &stat, nil -} - -func (d *Runner) GetNetworkAndAddresses(container string) (map[string]string, error) { - response, err := d.DockerAPI.ContainerInspect(context.Background(), container) - if err != nil { - return nil, fmt.Errorf("failed to fetch container inspection data: %v", err) - } - - if response.NetworkSettings == nil || len(response.NetworkSettings.Networks) == 0 { - return nil, fmt.Errorf("container (%v) had no associated network settings: %v", container, response) - } - - ret := make(map[string]string) - ns := response.NetworkSettings.Networks - for network, data := range ns { - if data == nil { - continue - } - - ret[network] = data.IPAddress - } - - if len(ret) == 0 { - return nil, fmt.Errorf("no valid network data for container (%v): %v", container, response) - } - - return ret, nil -} diff --git a/sdk/helper/errutil/error.go b/sdk/helper/errutil/error.go index 1866343b51835..0b95efb40e3a9 100644 --- a/sdk/helper/errutil/error.go +++ b/sdk/helper/errutil/error.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package errutil // UserError represents an error generated due to invalid user input diff --git a/sdk/helper/hclutil/hcl.go b/sdk/helper/hclutil/hcl.go index a78d820087d43..0b120367d5a6a 100644 --- a/sdk/helper/hclutil/hcl.go +++ b/sdk/helper/hclutil/hcl.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package hclutil import ( diff --git a/sdk/helper/identitytpl/templating.go b/sdk/helper/identitytpl/templating.go index 124a27c920c30..6d84df8241ded 100644 --- a/sdk/helper/identitytpl/templating.go +++ b/sdk/helper/identitytpl/templating.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package identitytpl import ( diff --git a/sdk/helper/identitytpl/templating_test.go b/sdk/helper/identitytpl/templating_test.go index d17409e78ae66..15bfc812387c6 100644 --- a/sdk/helper/identitytpl/templating_test.go +++ b/sdk/helper/identitytpl/templating_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package identitytpl import ( diff --git a/sdk/helper/jsonutil/json.go b/sdk/helper/jsonutil/json.go index 1abd9fafebdc8..c03a4f8c8d14c 100644 --- a/sdk/helper/jsonutil/json.go +++ b/sdk/helper/jsonutil/json.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package jsonutil import ( diff --git a/sdk/helper/jsonutil/json_test.go b/sdk/helper/jsonutil/json_test.go index 10aabf1b93ea3..dd33f9bf179aa 100644 --- a/sdk/helper/jsonutil/json_test.go +++ b/sdk/helper/jsonutil/json_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package jsonutil import ( diff --git a/sdk/helper/kdf/kdf.go b/sdk/helper/kdf/kdf.go index e9964ba28c4f7..9d3e0e858585e 100644 --- a/sdk/helper/kdf/kdf.go +++ b/sdk/helper/kdf/kdf.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // This package is used to implement Key Derivation Functions (KDF) // based on the recommendations of NIST SP 800-108. These are useful // for generating unique-per-transaction keys, or situations in which diff --git a/sdk/helper/kdf/kdf_test.go b/sdk/helper/kdf/kdf_test.go index ed5c0a13d36b4..2148257f357c2 100644 --- a/sdk/helper/kdf/kdf_test.go +++ b/sdk/helper/kdf/kdf_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kdf import ( diff --git a/sdk/helper/keysutil/cache.go b/sdk/helper/keysutil/cache.go index fb55091e40a8c..7da9c202fa58b 100644 --- a/sdk/helper/keysutil/cache.go +++ b/sdk/helper/keysutil/cache.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil type Cache interface { diff --git a/sdk/helper/keysutil/consts.go b/sdk/helper/keysutil/consts.go index b684232423213..cbb8123517f13 100644 --- a/sdk/helper/keysutil/consts.go +++ b/sdk/helper/keysutil/consts.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( @@ -16,8 +13,8 @@ import ( type HashType uint32 const ( - HashTypeNone HashType = iota - HashTypeSHA1 + _ = iota + HashTypeSHA1 HashType = iota HashTypeSHA2224 HashTypeSHA2256 HashTypeSHA2384 @@ -38,7 +35,6 @@ const ( var ( HashTypeMap = map[string]HashType{ - "none": HashTypeNone, "sha1": HashTypeSHA1, "sha2-224": HashTypeSHA2224, "sha2-256": HashTypeSHA2256, @@ -51,7 +47,6 @@ var ( } HashFuncMap = map[HashType]func() hash.Hash{ - HashTypeNone: nil, HashTypeSHA1: sha1.New, HashTypeSHA2224: sha256.New224, HashTypeSHA2256: sha256.New, @@ -64,7 +59,6 @@ var ( } CryptoHashMap = map[HashType]crypto.Hash{ - HashTypeNone: 0, HashTypeSHA1: crypto.SHA1, HashTypeSHA2224: crypto.SHA224, HashTypeSHA2256: crypto.SHA256, diff --git a/sdk/helper/keysutil/encrypted_key_storage.go b/sdk/helper/keysutil/encrypted_key_storage.go index 7314758bc1679..90eaaf0bbae1f 100644 --- a/sdk/helper/keysutil/encrypted_key_storage.go +++ b/sdk/helper/keysutil/encrypted_key_storage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( diff --git a/sdk/helper/keysutil/encrypted_key_storage_test.go b/sdk/helper/keysutil/encrypted_key_storage_test.go index 5147027fc8839..2f29d14b7ad7f 100644 --- a/sdk/helper/keysutil/encrypted_key_storage_test.go +++ b/sdk/helper/keysutil/encrypted_key_storage_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go index 6d2881e0d8daa..a60cf69d53f2c 100644 --- a/sdk/helper/keysutil/lock_manager.go +++ b/sdk/helper/keysutil/lock_manager.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( @@ -62,12 +59,6 @@ type PolicyRequest struct { // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault AllowImportedKeyRotation bool - - // Indicates whether a private or public key is imported/upserted - IsPrivateKey bool - - // The UUID of the managed key, if using one - ManagedKeyUUID string } type LockManager struct { @@ -391,12 +382,6 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) } - case KeyType_MANAGED_KEY: - if req.Derived || req.Convergent { - cleanup() - return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) - } - default: cleanup() return nil, false, fmt.Errorf("unsupported key type %v", req.KeyType) @@ -427,11 +412,7 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io } // Performs the actual persist and does setup - if p.Type == KeyType_MANAGED_KEY { - err = p.RotateManagedKey(ctx, req.Storage, req.ManagedKeyUUID) - } else { - err = p.Rotate(ctx, req.Storage, rand) - } + err = p.Rotate(ctx, req.Storage, rand) if err != nil { cleanup() return nil, false, err @@ -514,7 +495,7 @@ func (lm *LockManager) ImportPolicy(ctx context.Context, req PolicyRequest, key } } - err = p.ImportPublicOrPrivate(ctx, req.Storage, key, req.IsPrivateKey, rand) + err = p.Import(ctx, req.Storage, key, rand) if err != nil { return fmt.Errorf("error importing key: %s", err) } diff --git a/sdk/helper/keysutil/managed_key_util.go b/sdk/helper/keysutil/managed_key_util.go deleted file mode 100644 index bb3c0b2968b73..0000000000000 --- a/sdk/helper/keysutil/managed_key_util.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !enterprise - -package keysutil - -import ( - "context" - "errors" - - "github.com/hashicorp/vault/sdk/logical" -) - -type ManagedKeyParameters struct { - ManagedKeySystemView logical.ManagedKeySystemView - BackendUUID string - Context context.Context -} - -var errEntOnly = errors.New("managed keys are supported within enterprise edition only") - -func (p *Policy) decryptWithManagedKey(params ManagedKeyParameters, keyEntry KeyEntry, ciphertext []byte, nonce []byte, aad []byte) (plaintext []byte, err error) { - return nil, errEntOnly -} - -func (p *Policy) encryptWithManagedKey(params ManagedKeyParameters, keyEntry KeyEntry, plaintext []byte, nonce []byte, aad []byte) (ciphertext []byte, err error) { - return nil, errEntOnly -} - -func (p *Policy) signWithManagedKey(options *SigningOptions, keyEntry KeyEntry, input []byte) (sig []byte, err error) { - return nil, errEntOnly -} - -func (p *Policy) verifyWithManagedKey(options *SigningOptions, keyEntry KeyEntry, input, sig []byte) (verified bool, err error) { - return false, errEntOnly -} - -func (p *Policy) HMACWithManagedKey(ctx context.Context, ver int, managedKeySystemView logical.ManagedKeySystemView, backendUUID string, algorithm string, data []byte) (hmacBytes []byte, err error) { - return nil, errEntOnly -} - -func (p *Policy) RotateManagedKey(ctx context.Context, storage logical.Storage, managedKeyUUID string) error { - return errEntOnly -} diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go index 869733b3e9d4e..bbd587feb3554 100644 --- a/sdk/helper/keysutil/policy.go +++ b/sdk/helper/keysutil/policy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( @@ -22,7 +19,6 @@ import ( "encoding/pem" "errors" "fmt" - "hash" "io" "math/big" "path" @@ -42,8 +38,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/kdf" "github.com/hashicorp/vault/sdk/logical" - - "github.com/google/tink/go/kwp/subtle" ) // Careful with iota; don't put anything before it in this const block because @@ -85,14 +79,6 @@ type AEADFactory interface { GetAEAD(iv []byte) (cipher.AEAD, error) } -type AssociatedDataFactory interface { - GetAssociatedData() ([]byte, error) -} - -type ManagedKeyFactory interface { - GetManagedKeyParameters() ManagedKeyParameters -} - type RestoreInfo struct { Time time.Time `json:"time"` Version int `json:"version"` @@ -104,11 +90,10 @@ type BackupInfo struct { } type SigningOptions struct { - HashAlgorithm HashType - Marshaling MarshalingType - SaltLength int - SigAlgorithm string - ManagedKeyParams ManagedKeyParameters + HashAlgorithm HashType + Marshaling MarshalingType + SaltLength int + SigAlgorithm string } type SigningResult struct { @@ -124,7 +109,7 @@ type KeyType int func (kt KeyType) EncryptionSupported() bool { switch kt { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: return true } return false @@ -132,7 +117,7 @@ func (kt KeyType) EncryptionSupported() bool { func (kt KeyType) DecryptionSupported() bool { switch kt { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: return true } return false @@ -140,7 +125,7 @@ func (kt KeyType) DecryptionSupported() bool { func (kt KeyType) SigningSupported() bool { switch kt { - case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: return true } return false @@ -162,22 +147,6 @@ func (kt KeyType) DerivationSupported() bool { return false } -func (kt KeyType) AssociatedDataSupported() bool { - switch kt { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_MANAGED_KEY: - return true - } - return false -} - -func (kt KeyType) ImportPublicKeySupported() bool { - switch kt { - case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519: - return true - } - return false -} - func (kt KeyType) String() string { switch kt { case KeyType_AES128_GCM96: @@ -202,8 +171,6 @@ func (kt KeyType) String() string { return "rsa-4096" case KeyType_HMAC: return "hmac" - case KeyType_MANAGED_KEY: - return "managed_key" } return "[unknown]" @@ -229,8 +196,7 @@ type KeyEntry struct { EC_Y *big.Int `json:"ec_y"` EC_D *big.Int `json:"ec_d"` - RSAKey *rsa.PrivateKey `json:"rsa_key"` - RSAPublicKey *rsa.PublicKey `json:"rsa_public_key"` + RSAKey *rsa.PrivateKey `json:"rsa_key"` // The public key in an appropriate format for the type of key FormattedPublicKey string `json:"public_key"` @@ -242,16 +208,6 @@ type KeyEntry struct { // This is deprecated (but still filled) in favor of the value above which // is more precise DeprecatedCreationTime int64 `json:"creation_time"` - - ManagedKeyUUID string `json:"managed_key_id,omitempty"` -} - -func (ke *KeyEntry) IsPrivateKeyMissing() bool { - if ke.RSAKey != nil || ke.EC_D != nil || len(ke.Key) != 0 { - return false - } - - return true } // deprecatedKeyEntryMap is used to allow JSON marshal/unmarshal @@ -355,19 +311,6 @@ func LoadPolicy(ctx context.Context, s logical.Storage, path string) (*Policy, e return nil, err } - // Migrate RSA private keys to include their private counterpart. This lets - // us reference RSAPublicKey whenever we need to, without necessarily - // needing the private key handy, synchronizing the behavior with EC and - // Ed25519 key pairs. - switch policy.Type { - case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: - for _, entry := range policy.Keys { - if entry.RSAPublicKey == nil && entry.RSAKey != nil { - entry.RSAPublicKey = entry.RSAKey.Public().(*rsa.PublicKey) - } - } - } - policy.l = new(sync.RWMutex) return &policy, nil @@ -464,6 +407,8 @@ type Policy struct { // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault AllowImportedKeyRotation bool + + ManagedKeyName string `json:"managed_key_name,omitempty"` } func (p *Policy) Lock(exclusive bool) { @@ -903,10 +848,6 @@ func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, } func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { - return p.DecryptWithFactory(context, nonce, value, nil) -} - -func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factories ...interface{}) (string, error) { if !p.Type.DecryptionSupported() { return "", errutil.UserError{Err: fmt.Sprintf("message decryption not supported for key type %v", p.Type)} } @@ -974,29 +915,11 @@ func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factori return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} } - symopts := SymmetricOpts{ - Convergent: p.ConvergentEncryption, - ConvergentVersion: p.ConvergentVersion, - } - for index, rawFactory := range factories { - if rawFactory == nil { - continue - } - switch factory := rawFactory.(type) { - case AEADFactory: - symopts.AEADFactory = factory - case AssociatedDataFactory: - symopts.AdditionalData, err = factory.GetAssociatedData() - if err != nil { - return "", errutil.InternalError{Err: fmt.Sprintf("unable to get associated_data/additional_data from factory[%d]: %v", index, err)} - } - case ManagedKeyFactory: - default: - return "", errutil.InternalError{Err: fmt.Sprintf("unknown type of factory[%d]: %T", index, rawFactory)} - } - } - - plain, err = p.SymmetricDecryptRaw(encKey, decoded, symopts) + plain, err = p.SymmetricDecryptRaw(encKey, decoded, + SymmetricOpts{ + Convergent: p.ConvergentEncryption, + ConvergentVersion: p.ConvergentVersion, + }) if err != nil { return "", err } @@ -1006,40 +929,10 @@ func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factori return "", err } key := keyEntry.RSAKey - if key == nil { - return "", errutil.InternalError{Err: fmt.Sprintf("cannot decrypt ciphertext, key version does not have a private counterpart")} - } plain, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, key, decoded, nil) if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA decrypt the ciphertext: %v", err)} } - case KeyType_MANAGED_KEY: - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return "", err - } - var aad []byte - var managedKeyFactory ManagedKeyFactory - for _, f := range factories { - switch factory := f.(type) { - case AssociatedDataFactory: - aad, err = factory.GetAssociatedData() - if err != nil { - return "", err - } - case ManagedKeyFactory: - managedKeyFactory = factory - } - } - - if managedKeyFactory == nil { - return "", errors.New("key type is managed_key, but managed key parameters were not provided") - } - - plain, err = p.decryptWithManagedKey(managedKeyFactory.GetManagedKeyParameters(), keyEntry, decoded, nonce, aad) - if err != nil { - return "", err - } default: return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} @@ -1083,13 +976,13 @@ func (p *Policy) minRSAPSSSaltLength() int { return rsa.PSSSaltLengthEqualsHash } -func (p *Policy) maxRSAPSSSaltLength(keyBitLen int, hash crypto.Hash) int { +func (p *Policy) maxRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash) int { // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=288 - return (keyBitLen-1+7)/8 - 2 - hash.Size() + return (priv.N.BitLen()-1+7)/8 - 2 - hash.Size() } -func (p *Policy) validRSAPSSSaltLength(keyBitLen int, hash crypto.Hash, saltLength int) bool { - return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(keyBitLen, hash) +func (p *Policy) validRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash, saltLength int) bool { + return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(priv, hash) } func (p *Policy) SignWithOptions(ver int, context, input []byte, options *SigningOptions) (*SigningResult, error) { @@ -1116,11 +1009,6 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin return nil, err } - // Before signing, check if key has its private part, if not return error - if keyParams.IsPrivateKeyMissing() { - return nil, errutil.UserError{Err: "requested version for signing does not contain a private part"} - } - hashAlgorithm := options.HashAlgorithm marshaling := options.Marshaling saltLength := options.SaltLength @@ -1227,7 +1115,7 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key.N.BitLen(), algo, saltLength) { + if !p.validRSAPSSSaltLength(key, algo, saltLength) { return nil, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} } sig, err = rsa.SignPSS(rand.Reader, key, algo, input, &rsa.PSSOptions{SaltLength: saltLength}) @@ -1243,17 +1131,6 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} } - case KeyType_MANAGED_KEY: - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return nil, err - } - - sig, err = p.signWithManagedKey(options, keyEntry, input) - if err != nil { - return nil, err - } - default: return nil, fmt.Errorf("unsupported key type %v", p.Type) } @@ -1381,30 +1258,20 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil case KeyType_ED25519: - var pub ed25519.PublicKey + var key ed25519.PrivateKey if p.Derived { // Derive the key that should be used - key, err := p.GetKey(context, ver, 32) + var err error + key, err = p.GetKey(context, ver, 32) if err != nil { return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} } - pub = ed25519.PrivateKey(key).Public().(ed25519.PublicKey) } else { - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return false, err - } - - raw, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) - if err != nil { - return false, err - } - - pub = ed25519.PublicKey(raw) + key = ed25519.PrivateKey(p.Keys[strconv.Itoa(ver)].Key) } - return ed25519.Verify(pub, input, sigBytes), nil + return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: keyEntry, err := p.safeGetKeyEntry(ver) @@ -1412,6 +1279,8 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return false, err } + key := keyEntry.RSAKey + algo, ok := CryptoHashMap[hashAlgorithm] if !ok { return false, errutil.InternalError{Err: "unsupported hash algorithm"} @@ -1423,62 +1292,30 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o switch sigAlgorithm { case "pss": - publicKey := keyEntry.RSAPublicKey - if !keyEntry.IsPrivateKeyMissing() { - publicKey = &keyEntry.RSAKey.PublicKey - } - if !p.validRSAPSSSaltLength(publicKey.N.BitLen(), algo, saltLength) { + if !p.validRSAPSSSaltLength(key, algo, saltLength) { return false, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} } - err = rsa.VerifyPSS(publicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) + err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) case "pkcs1v15": - publicKey := keyEntry.RSAPublicKey - if !keyEntry.IsPrivateKeyMissing() { - publicKey = &keyEntry.RSAKey.PublicKey - } - err = rsa.VerifyPKCS1v15(publicKey, algo, input, sigBytes) + err = rsa.VerifyPKCS1v15(&key.PublicKey, algo, input, sigBytes) default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} } return err == nil, nil - case KeyType_MANAGED_KEY: - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return false, err - } - - return p.verifyWithManagedKey(options, keyEntry, input, sigBytes) - default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } } func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte, randReader io.Reader) error { - return p.ImportPublicOrPrivate(ctx, storage, key, true, randReader) -} - -func (p *Policy) ImportPublicOrPrivate(ctx context.Context, storage logical.Storage, key []byte, isPrivateKey bool, randReader io.Reader) error { now := time.Now() entry := KeyEntry{ CreationTime: now, DeprecatedCreationTime: now.Unix(), } - // Before we insert this entry, check if the latest version is incomplete - // and this entry matches the current version; if so, return without - // updating to the next version. - if p.LatestVersion > 0 { - latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] - if latestKey.IsPrivateKeyMissing() && isPrivateKey { - if err := p.ImportPrivateKeyForVersion(ctx, storage, p.LatestVersion, key); err == nil { - return nil - } - } - } - if p.Type != KeyType_HMAC { hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) if err != nil { @@ -1487,10 +1324,6 @@ func (p *Policy) ImportPublicOrPrivate(ctx context.Context, storage logical.Stor entry.HMACKey = hmacKey } - if p.Type == KeyType_ED25519 && p.Derived && !isPrivateKey { - return fmt.Errorf("unable to import only public key for derived Ed25519 key: imported key should not be an Ed25519 key pair but is instead an HKDF key") - } - if (p.Type == KeyType_AES128_GCM96 && len(key) != 16) || ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305) && len(key) != 32) || (p.Type == KeyType_HMAC && (len(key) < HmacMinKeySize || len(key) > HmacMaxKeySize)) { @@ -1504,42 +1337,83 @@ func (p *Policy) ImportPublicOrPrivate(ctx context.Context, storage logical.Stor entry.HMACKey = key } } else { - var parsedKey any - var err error - if isPrivateKey { - parsedKey, err = x509.ParsePKCS8PrivateKey(key) - if err != nil { - if strings.Contains(err.Error(), "unknown elliptic curve") { - var edErr error - parsedKey, edErr = ParsePKCS8Ed25519PrivateKey(key) - if edErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) - } - - // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! - } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { - var rsaErr error - parsedKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) - if rsaErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) - } - - // Parsing as RSA-PSS in PKCS8 succeeded! - } else { - return fmt.Errorf("error parsing asymmetric key: %s", err) + parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) } - } else { - pemBlock, _ := pem.Decode(key) - parsedKey, err = x509.ParsePKIXPublicKey(pemBlock.Bytes) + } + + switch parsedPrivateKey.(type) { + case *ecdsa.PrivateKey: + if p.Type != KeyType_ECDSA_P256 && p.Type != KeyType_ECDSA_P384 && p.Type != KeyType_ECDSA_P521 { + return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) + } + + ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) + curve := elliptic.P256() + if p.Type == KeyType_ECDSA_P384 { + curve = elliptic.P384() + } else if p.Type == KeyType_ECDSA_P521 { + curve = elliptic.P521() + } + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + entry.EC_D = ecdsaKey.D + entry.EC_X = ecdsaKey.X + entry.EC_Y = ecdsaKey.Y + derBytes, err := x509.MarshalPKIXPublicKey(ecdsaKey.Public()) if err != nil { - return fmt.Errorf("error parsing public key: %s", err) + return errwrap.Wrapf("error marshaling public key: {{err}}", err) } - } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + entry.FormattedPublicKey = string(pemBytes) + case ed25519.PrivateKey: + if p.Type != KeyType_ED25519 { + return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) + } + privateKey := parsedPrivateKey.(ed25519.PrivateKey) - err = entry.parseFromKey(p.Type, parsedKey) - if err != nil { - return err + entry.Key = privateKey + publicKey := privateKey.Public().(ed25519.PublicKey) + entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + case *rsa.PrivateKey: + if p.Type != KeyType_RSA2048 && p.Type != KeyType_RSA3072 && p.Type != KeyType_RSA4096 { + return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) + } + + keyBytes := 256 + if p.Type == KeyType_RSA3072 { + keyBytes = 384 + } else if p.Type == KeyType_RSA4096 { + keyBytes = 512 + } + rsaKey := parsedPrivateKey.(*rsa.PrivateKey) + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + + entry.RSAKey = rsaKey + default: + return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) } } @@ -1667,19 +1541,13 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { entry.FormattedPublicKey = string(pemBytes) case KeyType_ED25519: - // Go uses a 64-byte private key for Ed25519 keys (private+public, each - // 32-bytes long). When we do Key derivation, we still generate a 32-byte - // random value (and compute the corresponding Ed25519 public key), but - // use this entire 64-byte key as if it was an HKDF key. The corresponding - // underlying public key is never returned (which is probably good, because - // doing so would leak half of our HKDF key...), but means we cannot import - // derived-enabled Ed25519 public key components. pub, pri, err := ed25519.GenerateKey(randReader) if err != nil { return err } entry.Key = pri entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: bitSize := 2048 if p.Type == KeyType_RSA3072 { @@ -1972,7 +1840,7 @@ func (p *Policy) SymmetricDecryptRaw(encKey, ciphertext []byte, opts SymmetricOp return plain, nil } -func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value string, factories ...interface{}) (string, error) { +func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value string, factory AEADFactory) (string, error) { if !p.Type.EncryptionSupported() { return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} } @@ -2033,30 +1901,14 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value } } - symopts := SymmetricOpts{ - Convergent: p.ConvergentEncryption, - HMACKey: hmacKey, - Nonce: nonce, - } - for index, rawFactory := range factories { - if rawFactory == nil { - continue - } - switch factory := rawFactory.(type) { - case AEADFactory: - symopts.AEADFactory = factory - case AssociatedDataFactory: - symopts.AdditionalData, err = factory.GetAssociatedData() - if err != nil { - return "", errutil.InternalError{Err: fmt.Sprintf("unable to get associated_data/additional_data from factory[%d]: %v", index, err)} - } - case ManagedKeyFactory: - default: - return "", errutil.InternalError{Err: fmt.Sprintf("unknown type of factory[%d]: %T", index, rawFactory)} - } - } + ciphertext, err = p.SymmetricEncryptRaw(ver, encKey, plaintext, + SymmetricOpts{ + Convergent: p.ConvergentEncryption, + HMACKey: hmacKey, + Nonce: nonce, + AEADFactory: factory, + }) - ciphertext, err = p.SymmetricEncryptRaw(ver, encKey, plaintext, symopts) if err != nil { return "", err } @@ -2065,44 +1917,11 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value if err != nil { return "", err } - var publicKey *rsa.PublicKey - if keyEntry.RSAKey != nil { - publicKey = &keyEntry.RSAKey.PublicKey - } else { - publicKey = keyEntry.RSAPublicKey - } - ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, publicKey, plaintext, nil) + key := keyEntry.RSAKey + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} } - case KeyType_MANAGED_KEY: - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return "", err - } - - var aad []byte - var managedKeyFactory ManagedKeyFactory - for _, f := range factories { - switch factory := f.(type) { - case AssociatedDataFactory: - aad, err = factory.GetAssociatedData() - if err != nil { - return "", nil - } - case ManagedKeyFactory: - managedKeyFactory = factory - } - } - - if managedKeyFactory == nil { - return "", errors.New("key type is managed_key, but managed key parameters were not provided") - } - - ciphertext, err = p.encryptWithManagedKey(managedKeyFactory.GetManagedKeyParameters(), keyEntry, plaintext, nonce, aad) - if err != nil { - return "", err - } default: return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} @@ -2116,280 +1935,3 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value return encoded, nil } - -func (p *Policy) KeyVersionCanBeUpdated(keyVersion int, isPrivateKey bool) error { - keyEntry, err := p.safeGetKeyEntry(keyVersion) - if err != nil { - return err - } - - if !p.Type.ImportPublicKeySupported() { - return errors.New("provided type does not support importing key versions") - } - - isPrivateKeyMissing := keyEntry.IsPrivateKeyMissing() - if isPrivateKeyMissing && !isPrivateKey { - return errors.New("cannot add a public key to a key version that already has a public key set") - } - - if !isPrivateKeyMissing { - return errors.New("private key imported, key version cannot be updated") - } - - return nil -} - -func (p *Policy) ImportPrivateKeyForVersion(ctx context.Context, storage logical.Storage, keyVersion int, key []byte) error { - keyEntry, err := p.safeGetKeyEntry(keyVersion) - if err != nil { - return err - } - - // Parse key - parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - if strings.Contains(err.Error(), "unknown elliptic curve") { - var edErr error - parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) - if edErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) - } - - // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! - } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { - var rsaErr error - parsedPrivateKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) - if rsaErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) - } - - // Parsing as RSA-PSS in PKCS8 succeeded! - } else { - return fmt.Errorf("error parsing asymmetric key: %s", err) - } - } - - switch parsedPrivateKey.(type) { - case *ecdsa.PrivateKey: - ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) - pemBlock, _ := pem.Decode([]byte(keyEntry.FormattedPublicKey)) - publicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil || publicKey == nil { - return fmt.Errorf("failed to parse key entry public key: %v", err) - } - if !publicKey.(*ecdsa.PublicKey).Equal(&ecdsaKey.PublicKey) { - return fmt.Errorf("cannot import key, key pair does not match") - } - case *rsa.PrivateKey: - rsaKey := parsedPrivateKey.(*rsa.PrivateKey) - if !rsaKey.PublicKey.Equal(keyEntry.RSAPublicKey) { - return fmt.Errorf("cannot import key, key pair does not match") - } - case ed25519.PrivateKey: - ed25519Key := parsedPrivateKey.(ed25519.PrivateKey) - publicKey, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) - if err != nil { - return fmt.Errorf("failed to parse key entry public key: %v", err) - } - if !ed25519.PublicKey(publicKey).Equal(ed25519Key.Public()) { - return fmt.Errorf("cannot import key, key pair does not match") - } - } - - err = keyEntry.parseFromKey(p.Type, parsedPrivateKey) - if err != nil { - return err - } - - p.Keys[strconv.Itoa(keyVersion)] = keyEntry - - return p.Persist(ctx, storage) -} - -func (ke *KeyEntry) parseFromKey(PolKeyType KeyType, parsedKey any) error { - switch parsedKey.(type) { - case *ecdsa.PrivateKey, *ecdsa.PublicKey: - if PolKeyType != KeyType_ECDSA_P256 && PolKeyType != KeyType_ECDSA_P384 && PolKeyType != KeyType_ECDSA_P521 { - return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) - } - - curve := elliptic.P256() - if PolKeyType == KeyType_ECDSA_P384 { - curve = elliptic.P384() - } else if PolKeyType == KeyType_ECDSA_P521 { - curve = elliptic.P521() - } - - var derBytes []byte - var err error - ecdsaKey, ok := parsedKey.(*ecdsa.PrivateKey) - if ok { - - if ecdsaKey.Curve != curve { - return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) - } - - ke.EC_D = ecdsaKey.D - ke.EC_X = ecdsaKey.X - ke.EC_Y = ecdsaKey.Y - - derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey.Public()) - if err != nil { - return errwrap.Wrapf("error marshaling public key: {{err}}", err) - } - } else { - ecdsaKey := parsedKey.(*ecdsa.PublicKey) - - if ecdsaKey.Curve != curve { - return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) - } - - ke.EC_X = ecdsaKey.X - ke.EC_Y = ecdsaKey.Y - - derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey) - if err != nil { - return errwrap.Wrapf("error marshaling public key: {{err}}", err) - } - } - - pemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: derBytes, - } - pemBytes := pem.EncodeToMemory(pemBlock) - if pemBytes == nil || len(pemBytes) == 0 { - return fmt.Errorf("error PEM-encoding public key") - } - ke.FormattedPublicKey = string(pemBytes) - case ed25519.PrivateKey, ed25519.PublicKey: - if PolKeyType != KeyType_ED25519 { - return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) - } - - privateKey, ok := parsedKey.(ed25519.PrivateKey) - if ok { - ke.Key = privateKey - publicKey := privateKey.Public().(ed25519.PublicKey) - ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) - } else { - publicKey := parsedKey.(ed25519.PublicKey) - ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) - } - case *rsa.PrivateKey, *rsa.PublicKey: - if PolKeyType != KeyType_RSA2048 && PolKeyType != KeyType_RSA3072 && PolKeyType != KeyType_RSA4096 { - return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) - } - - keyBytes := 256 - if PolKeyType == KeyType_RSA3072 { - keyBytes = 384 - } else if PolKeyType == KeyType_RSA4096 { - keyBytes = 512 - } - - rsaKey, ok := parsedKey.(*rsa.PrivateKey) - if ok { - if rsaKey.Size() != keyBytes { - return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) - } - ke.RSAKey = rsaKey - ke.RSAPublicKey = rsaKey.Public().(*rsa.PublicKey) - } else { - rsaKey := parsedKey.(*rsa.PublicKey) - if rsaKey.Size() != keyBytes { - return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) - } - ke.RSAPublicKey = rsaKey - } - default: - return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) - } - - return nil -} - -func (p *Policy) WrapKey(ver int, targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { - if !p.Type.SigningSupported() { - return "", fmt.Errorf("message signing not supported for key type %v", p.Type) - } - - switch { - case ver == 0: - ver = p.LatestVersion - case ver < 0: - return "", errutil.UserError{Err: "requested version for key wrapping is negative"} - case ver > p.LatestVersion: - return "", errutil.UserError{Err: "requested version for key wrapping is higher than the latest key version"} - case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: - return "", errutil.UserError{Err: "requested version for key wrapping is less than the minimum encryption key version"} - } - - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return "", err - } - - return keyEntry.WrapKey(targetKey, targetKeyType, hash) -} - -func (ke *KeyEntry) WrapKey(targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { - // Presently this method implements a CKM_RSA_AES_KEY_WRAP-compatible - // wrapping interface and only works on RSA keyEntries as a result. - if ke.RSAPublicKey == nil { - return "", fmt.Errorf("unsupported key type in use; must be a rsa key") - } - - var preppedTargetKey []byte - switch targetKeyType { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC: - var ok bool - preppedTargetKey, ok = targetKey.([]byte) - if !ok { - return "", fmt.Errorf("failed to wrap target key for import: symmetric key not provided in byte format (%T)", targetKey) - } - default: - var err error - preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) - if err != nil { - return "", fmt.Errorf("failed to wrap target key for import: %w", err) - } - } - - result, err := wrapTargetPKCS8ForImport(ke.RSAPublicKey, preppedTargetKey, hash) - if err != nil { - return result, fmt.Errorf("failed to wrap target key for import: %w", err) - } - - return result, nil -} - -func wrapTargetPKCS8ForImport(wrappingKey *rsa.PublicKey, preppedTargetKey []byte, hash hash.Hash) (string, error) { - // Generate an ephemeral AES-256 key - ephKey, err := uuid.GenerateRandomBytes(32) - if err != nil { - return "", fmt.Errorf("failed to generate an ephemeral AES wrapping key: %w", err) - } - - // Wrap ephemeral AES key with public wrapping key - ephKeyWrapped, err := rsa.EncryptOAEP(hash, rand.Reader, wrappingKey, ephKey, []byte{} /* label */) - if err != nil { - return "", fmt.Errorf("failed to encrypt ephemeral wrapping key with public key: %w", err) - } - - // Create KWP instance for wrapping target key - kwp, err := subtle.NewKWP(ephKey) - if err != nil { - return "", fmt.Errorf("failed to generate new KWP from AES key: %w", err) - } - - // Wrap target key with KWP - targetKeyWrapped, err := kwp.Wrap(preppedTargetKey) - if err != nil { - return "", fmt.Errorf("failed to wrap target key with KWP: %w", err) - } - - // Combined wrapped keys into a single blob and base64 encode - wrappedKeys := append(ephKeyWrapped, targetKeyWrapped...) - return base64.StdEncoding.EncodeToString(wrappedKeys), nil -} diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go index f5e4d35eb81c4..91767cfd9d398 100644 --- a/sdk/helper/keysutil/policy_test.go +++ b/sdk/helper/keysutil/policy_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( @@ -846,7 +843,7 @@ func Test_RSA_PSS(t *testing.T) { } cryptoHash := CryptoHashMap[hashType] minSaltLength := p.minRSAPSSSaltLength() - maxSaltLength := p.maxRSAPSSSaltLength(rsaKey.N.BitLen(), cryptoHash) + maxSaltLength := p.maxRSAPSSSaltLength(rsaKey, cryptoHash) hash := cryptoHash.New() hash.Write(input) input = hash.Sum(nil) @@ -956,9 +953,6 @@ func Test_RSA_PSS(t *testing.T) { // 2. For each hash algorithm... for hashAlgorithm, hashType := range HashTypeMap { t.Log(tabs[1], "Hash algorithm:", hashAlgorithm) - if hashAlgorithm == "none" { - continue - } // 3. For each marshaling type... for marshalingName, marshalingType := range MarshalingTypeMap { @@ -970,92 +964,6 @@ func Test_RSA_PSS(t *testing.T) { } } -func Test_RSA_PKCS1(t *testing.T) { - t.Log("Testing RSA PKCS#1v1.5") - - ctx := context.Background() - storage := &logical.InmemStorage{} - // https://crypto.stackexchange.com/a/1222 - input := []byte("Sphinx of black quartz, judge my vow") - sigAlgorithm := "pkcs1v15" - - tabs := make(map[int]string) - for i := 1; i <= 6; i++ { - tabs[i] = strings.Repeat("\t", i) - } - - test_RSA_PKCS1 := func(t *testing.T, p *Policy, rsaKey *rsa.PrivateKey, hashType HashType, - marshalingType MarshalingType, - ) { - unsaltedOptions := SigningOptions{ - HashAlgorithm: hashType, - Marshaling: marshalingType, - SigAlgorithm: sigAlgorithm, - } - cryptoHash := CryptoHashMap[hashType] - - // PKCS#1v1.5 NoOID uses a direct input and assumes it is pre-hashed. - if hashType != 0 { - hash := cryptoHash.New() - hash.Write(input) - input = hash.Sum(nil) - } - - // 1. Make a signature with the given key size and hash algorithm. - t.Log(tabs[3], "Make an automatic signature") - sig, err := p.Sign(0, nil, input, hashType, sigAlgorithm, marshalingType) - if err != nil { - // A bit of a hack but FIPS go does not support some hash types - if isUnsupportedGoHashType(hashType, err) { - t.Skip(tabs[4], "skipping test as FIPS Go does not support hash type") - return - } - t.Fatal(tabs[4], "❌ Failed to automatically sign:", err) - } - - // 1.1 Verify this signature using the *inferred* salt length. - autoVerify(4, t, p, input, sig, unsaltedOptions) - } - - rsaKeyTypes := []KeyType{KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096} - testKeys, err := generateTestKeys() - if err != nil { - t.Fatalf("error generating test keys: %s", err) - } - - // 1. For each standard RSA key size 2048, 3072, and 4096... - for _, rsaKeyType := range rsaKeyTypes { - t.Log("Key size: ", rsaKeyType) - p := &Policy{ - Name: fmt.Sprint(rsaKeyType), // NOTE: crucial to create a new key per key size - Type: rsaKeyType, - } - - rsaKeyBytes := testKeys[rsaKeyType] - err := p.Import(ctx, storage, rsaKeyBytes, rand.Reader) - if err != nil { - t.Fatal(tabs[1], "❌ Failed to import key:", err) - } - rsaKeyAny, err := x509.ParsePKCS8PrivateKey(rsaKeyBytes) - if err != nil { - t.Fatalf("error parsing test keys: %s", err) - } - rsaKey := rsaKeyAny.(*rsa.PrivateKey) - - // 2. For each hash algorithm... - for hashAlgorithm, hashType := range HashTypeMap { - t.Log(tabs[1], "Hash algorithm:", hashAlgorithm) - - // 3. For each marshaling type... - for marshalingName, marshalingType := range MarshalingTypeMap { - t.Log(tabs[2], "Marshaling type:", marshalingName) - testName := fmt.Sprintf("%s-%s-%s", rsaKeyType, hashAlgorithm, marshalingName) - t.Run(testName, func(t *testing.T) { test_RSA_PKCS1(t, p, rsaKey, hashType, marshalingType) }) - } - } - } -} - // Normal Go builds support all the hash functions for RSA_PSS signatures but the // FIPS Go build does not support at this time the SHA3 hashes as FIPS 140_2 does // not accept them. diff --git a/sdk/helper/keysutil/transit_lru.go b/sdk/helper/keysutil/transit_lru.go index 66ea66dc74e23..cd1f6dafe693e 100644 --- a/sdk/helper/keysutil/transit_lru.go +++ b/sdk/helper/keysutil/transit_lru.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import lru "github.com/hashicorp/golang-lru" diff --git a/sdk/helper/keysutil/transit_syncmap.go b/sdk/helper/keysutil/transit_syncmap.go index fddcf706b2f12..ce9071380a996 100644 --- a/sdk/helper/keysutil/transit_syncmap.go +++ b/sdk/helper/keysutil/transit_syncmap.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( diff --git a/sdk/helper/keysutil/util.go b/sdk/helper/keysutil/util.go index 94a56d42c5735..063af5914672d 100644 --- a/sdk/helper/keysutil/util.go +++ b/sdk/helper/keysutil/util.go @@ -1,10 +1,6 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package keysutil import ( - "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "errors" @@ -57,9 +53,6 @@ var ( // Other implementations may use the OID 1.3.101.110 from // https://datatracker.ietf.org/doc/html/rfc8410. oidRFC8410Ed25519 = asn1.ObjectIdentifier{1, 3, 101, 110} - - // See crypto/x509/x509.go in the Go toolchain source distribution. - oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} ) func isEd25519OID(oid asn1.ObjectIdentifier) bool { @@ -120,32 +113,3 @@ func ParsePKCS8Ed25519PrivateKey(der []byte) (key interface{}, err error) { return ed25519.NewKeyFromSeed(ed25519Key.PrivateKey), nil } - -// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. -// -// This helper only supports RSA/PSS keys (with OID 1.2.840.113549.1.1.10). -// -// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. -// More types might be supported in the future. -// -// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". -func ParsePKCS8RSAPSSPrivateKey(der []byte) (key interface{}, err error) { - var privKey pkcs8 - if _, err := asn1.Unmarshal(der, &privKey); err == nil { - switch { - case privKey.Algo.Algorithm.Equal(oidSignatureRSAPSS): - // Fall through; there's no parameters here unlike ECDSA - // containers, so we can go to parsing the inner rsaPrivateKey - // object. - default: - return nil, errors.New("keysutil: failed to parse key as RSA PSS private key") - } - } - - key, err = x509.ParsePKCS1PrivateKey(privKey.PrivateKey) - if err != nil { - return nil, fmt.Errorf("keysutil: failed to parse inner RSA PSS private key: %w", err) - } - - return key, nil -} diff --git a/sdk/helper/ldaputil/client.go b/sdk/helper/ldaputil/client.go index c3562a5d406df..bbdca9a4e6a8b 100644 --- a/sdk/helper/ldaputil/client.go +++ b/sdk/helper/ldaputil/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldaputil import ( @@ -139,11 +136,10 @@ func (c *Client) makeLdapSearchRequest(cfg *ConfigEntry, conn Connection, userna c.Logger.Debug("discovering user", "userdn", cfg.UserDN, "filter", renderedFilter) } ldapRequest := &ldap.SearchRequest{ - BaseDN: cfg.UserDN, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Scope: ldap.ScopeWholeSubtree, - Filter: renderedFilter, - SizeLimit: 2, // Should be only 1 result. Any number larger (2 or more) means access denied. + BaseDN: cfg.UserDN, + Scope: ldap.ScopeWholeSubtree, + Filter: renderedFilter, + SizeLimit: 2, // Should be only 1 result. Any number larger (2 or more) means access denied. Attributes: []string{ cfg.UserAttr, // Return only needed attributes }, @@ -230,10 +226,6 @@ func (c *Client) RenderUserSearchFilter(cfg *ConfigEntry, username string) (stri context.Username = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain) } - // Execute the template. Note that the template context contains escaped input and does - // not provide behavior via functions. Additionally, no function map has been provided - // during template initialization. The only template functions available during execution - // are the predefined global functions: https://pkg.go.dev/text/template#hdr-Functions var renderedFilter bytes.Buffer if err := t.Execute(&renderedFilter, context); err != nil { return "", fmt.Errorf("LDAP search failed due to template parsing error: %w", err) @@ -299,11 +291,10 @@ func (c *Client) GetUserDN(cfg *ConfigEntry, conn Connection, bindDN, username s c.Logger.Debug("searching upn", "userdn", cfg.UserDN, "filter", filter) } result, err := conn.Search(&ldap.SearchRequest{ - BaseDN: cfg.UserDN, - Scope: ldap.ScopeWholeSubtree, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Filter: filter, - SizeLimit: math.MaxInt32, + BaseDN: cfg.UserDN, + Scope: ldap.ScopeWholeSubtree, + Filter: filter, + SizeLimit: math.MaxInt32, }) if err != nil { return userDN, fmt.Errorf("LDAP search failed for detecting user: %w", err) @@ -361,10 +352,9 @@ func (c *Client) performLdapFilterGroupsSearch(cfg *ConfigEntry, conn Connection } result, err := conn.Search(&ldap.SearchRequest{ - BaseDN: cfg.GroupDN, - Scope: ldap.ScopeWholeSubtree, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Filter: renderedQuery.String(), + BaseDN: cfg.GroupDN, + Scope: ldap.ScopeWholeSubtree, + Filter: renderedQuery.String(), Attributes: []string{ cfg.GroupAttr, }, @@ -410,10 +400,6 @@ func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn Pagi ldap.EscapeFilter(username), } - // Execute the template. Note that the template context contains escaped input and does - // not provide behavior via functions. Additionally, no function map has been provided - // during template initialization. The only template functions available during execution - // are the predefined global functions: https://pkg.go.dev/text/template#hdr-Functions var renderedQuery bytes.Buffer if err := t.Execute(&renderedQuery, context); err != nil { return nil, fmt.Errorf("LDAP search failed due to template parsing error: %w", err) @@ -424,10 +410,9 @@ func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn Pagi } result, err := conn.SearchWithPaging(&ldap.SearchRequest{ - BaseDN: cfg.GroupDN, - Scope: ldap.ScopeWholeSubtree, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Filter: renderedQuery.String(), + BaseDN: cfg.GroupDN, + Scope: ldap.ScopeWholeSubtree, + Filter: renderedQuery.String(), Attributes: []string{ cfg.GroupAttr, }, @@ -474,10 +459,9 @@ func sidBytesToString(b []byte) (string, error) { func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, userDN string) ([]*ldap.Entry, error) { result, err := conn.Search(&ldap.SearchRequest{ - BaseDN: userDN, - Scope: ldap.ScopeBaseObject, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Filter: "(objectClass=*)", + BaseDN: userDN, + Scope: ldap.ScopeBaseObject, + Filter: "(objectClass=*)", Attributes: []string{ "tokenGroups", }, @@ -503,10 +487,9 @@ func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, } groupResult, err := conn.Search(&ldap.SearchRequest{ - BaseDN: fmt.Sprintf("", sidString), - Scope: ldap.ScopeBaseObject, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Filter: "(objectClass=*)", + BaseDN: fmt.Sprintf("", sidString), + Scope: ldap.ScopeBaseObject, + Filter: "(objectClass=*)", Attributes: []string{ "1.1", // RFC no attributes }, diff --git a/sdk/helper/ldaputil/client_test.go b/sdk/helper/ldaputil/client_test.go index 167d50f22d628..c9ae9cd4baa5a 100644 --- a/sdk/helper/ldaputil/client_test.go +++ b/sdk/helper/ldaputil/client_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldaputil import ( diff --git a/sdk/helper/ldaputil/config.go b/sdk/helper/ldaputil/config.go index dfa34daddfd08..041ed0704ae05 100644 --- a/sdk/helper/ldaputil/config.go +++ b/sdk/helper/ldaputil/config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldaputil import ( @@ -16,17 +13,8 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/errwrap" - - "github.com/go-ldap/ldap/v3" ) -var ldapDerefAliasMap = map[string]int{ - "never": ldap.NeverDerefAliases, - "finding": ldap.DerefFindingBaseObj, - "searching": ldap.DerefInSearching, - "always": ldap.DerefAlways, -} - // ConfigFields returns all the config fields that can potentially be used by the LDAP client. // Not all fields will be used by every integration. func ConfigFields() map[string]*framework.FieldSchema { @@ -245,13 +233,6 @@ Default: ({{.UserAttr}}={{.Username}})`, Default: "30s", }, - "dereference_aliases": { - Type: framework.TypeString, - Description: "When aliases should be dereferenced on search operations. Accepted values are 'never', 'finding', 'searching', 'always'. Defaults to 'never'.", - Default: "never", - AllowedValues: []interface{}{"never", "finding", "searching", "always"}, - }, - "max_page_size": { Type: framework.TypeInt, Description: "If set to a value greater than 0, the LDAP backend will use the LDAP server's paged search control to request pages of up to the given size. This can be used to avoid hitting the LDAP server's maximum result size limit. Otherwise, the LDAP backend will not use the paged search control.", @@ -427,10 +408,6 @@ func NewConfigEntry(existing *ConfigEntry, d *framework.FieldData) (*ConfigEntry cfg.ConnectionTimeout = d.Get("connection_timeout").(int) } - if _, ok := d.Raw["dereference_aliases"]; ok || !hadExisting { - cfg.DerefAliases = d.Get("dereference_aliases").(string) - } - if _, ok := d.Raw["max_page_size"]; ok || !hadExisting { cfg.MaximumPageSize = d.Get("max_page_size").(int) } @@ -462,7 +439,6 @@ type ConfigEntry struct { UsePre111GroupCNBehavior *bool `json:"use_pre111_group_cn_behavior"` RequestTimeout int `json:"request_timeout"` ConnectionTimeout int `json:"connection_timeout"` - DerefAliases string `json:"dereference_aliases"` MaximumPageSize int `json:"max_page_size"` // These json tags deviate from snake case because there was a past issue @@ -503,7 +479,6 @@ func (c *ConfigEntry) PasswordlessMap() map[string]interface{} { "request_timeout": c.RequestTimeout, "connection_timeout": c.ConnectionTimeout, "username_as_alias": c.UsernameAsAlias, - "dereference_aliases": c.DerefAliases, "max_page_size": c.MaximumPageSize, } if c.CaseSensitiveNames != nil { diff --git a/sdk/helper/ldaputil/config_test.go b/sdk/helper/ldaputil/config_test.go index b7fd22ccbb2df..c0a57253ce7df 100644 --- a/sdk/helper/ldaputil/config_test.go +++ b/sdk/helper/ldaputil/config_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldaputil import ( @@ -174,7 +171,6 @@ var jsonConfigDefault = []byte(` "username_as_alias": false, "request_timeout": 90, "connection_timeout": 30, - "dereference_aliases": "never", "max_page_size": 0, "CaseSensitiveNames": false, "ClientTLSCert": "", diff --git a/sdk/helper/ldaputil/connection.go b/sdk/helper/ldaputil/connection.go index c33ad403f78e2..71c83f2f9b3a9 100644 --- a/sdk/helper/ldaputil/connection.go +++ b/sdk/helper/ldaputil/connection.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldaputil import ( diff --git a/sdk/helper/ldaputil/ldap.go b/sdk/helper/ldaputil/ldap.go index bdf746e5c8cd4..73e36b230dc0e 100644 --- a/sdk/helper/ldaputil/ldap.go +++ b/sdk/helper/ldaputil/ldap.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package ldaputil import ( diff --git a/sdk/helper/license/feature.go b/sdk/helper/license/feature.go index b42fcd1fc1a19..c7c000a58a30d 100644 --- a/sdk/helper/license/feature.go +++ b/sdk/helper/license/feature.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package license // Features is a bitmask of feature flags diff --git a/sdk/helper/locksutil/locks.go b/sdk/helper/locksutil/locks.go index c7538b63b4f78..35ffcf739d9dc 100644 --- a/sdk/helper/locksutil/locks.go +++ b/sdk/helper/locksutil/locks.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package locksutil import ( diff --git a/sdk/helper/locksutil/locks_test.go b/sdk/helper/locksutil/locks_test.go index 954a46349ea7e..9916644637779 100644 --- a/sdk/helper/locksutil/locks_test.go +++ b/sdk/helper/locksutil/locks_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package locksutil import "testing" diff --git a/sdk/helper/logging/logging.go b/sdk/helper/logging/logging.go index 37dcefa477836..211a545e33601 100644 --- a/sdk/helper/logging/logging.go +++ b/sdk/helper/logging/logging.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logging import ( @@ -63,13 +60,16 @@ func ParseLogFormat(format string) (LogFormat, error) { case "json": return JSONFormat, nil default: - return UnspecifiedFormat, fmt.Errorf("unknown log format: %s", format) + return UnspecifiedFormat, fmt.Errorf("Unknown log format: %s", format) } } // ParseEnvLogFormat parses the log format from an environment variable. func ParseEnvLogFormat() LogFormat { logFormat := os.Getenv("VAULT_LOG_FORMAT") + if logFormat == "" { + logFormat = os.Getenv("LOGXI_FORMAT") + } switch strings.ToLower(logFormat) { case "json", "vault_json", "vault-json", "vaultjson": return JSONFormat diff --git a/sdk/helper/logging/logging_test.go b/sdk/helper/logging/logging_test.go index 16075524b0b8c..3a860d199c820 100644 --- a/sdk/helper/logging/logging_test.go +++ b/sdk/helper/logging/logging_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logging import ( @@ -24,7 +21,7 @@ func Test_ParseLogFormat(t *testing.T) { {format: "STANDARD", expected: StandardFormat, expectedErr: nil}, {format: "json", expected: JSONFormat, expectedErr: nil}, {format: " json ", expected: JSONFormat, expectedErr: nil}, - {format: "bogus", expected: UnspecifiedFormat, expectedErr: errors.New("unknown log format: bogus")}, + {format: "bogus", expected: UnspecifiedFormat, expectedErr: errors.New("Unknown log format: bogus")}, } for _, test := range tests { @@ -45,6 +42,17 @@ func Test_ParseEnv_VAULT_LOG_FORMAT(t *testing.T) { testParseEnvLogFormat(t, "VAULT_LOG_FORMAT") } +func Test_ParseEnv_LOGXI_FORMAT(t *testing.T) { + oldVLF := os.Getenv("VAULT_LOG_FORMAT") + defer os.Setenv("VAULT_LOG_FORMAT", oldVLF) + + oldLogxi := os.Getenv("LOGXI_FORMAT") + defer os.Setenv("LOGXI_FORMAT", oldLogxi) + + os.Setenv("VAULT_LOG_FORMAT", "") + testParseEnvLogFormat(t, "LOGXI_FORMAT") +} + func testParseEnvLogFormat(t *testing.T, name string) { env := []string{ "json", "vauLT_Json", "VAULT-JSON", "vaulTJSon", diff --git a/sdk/helper/mlock/mlock.go b/sdk/helper/mlock/mlock.go index 5820d15af3c1d..1bbf8a0bbbf83 100644 --- a/sdk/helper/mlock/mlock.go +++ b/sdk/helper/mlock/mlock.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package mlock diff --git a/sdk/helper/ocsp/client.go b/sdk/helper/ocsp/client.go deleted file mode 100644 index c1b9c2fbc37a7..0000000000000 --- a/sdk/helper/ocsp/client.go +++ /dev/null @@ -1,1165 +0,0 @@ -// Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. - -package ocsp - -import ( - "bytes" - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "errors" - "fmt" - "io" - "math/big" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-retryablehttp" - lru "github.com/hashicorp/golang-lru" - "github.com/hashicorp/vault/sdk/helper/certutil" - "golang.org/x/crypto/ocsp" -) - -// FailOpenMode is OCSP fail open mode. FailOpenTrue by default and may -// set to ocspModeFailClosed for fail closed mode -type FailOpenMode uint32 - -type requestFunc func(method, urlStr string, body interface{}) (*retryablehttp.Request, error) - -type clientInterface interface { - Do(req *retryablehttp.Request) (*http.Response, error) -} - -const ( - httpHeaderContentType = "Content-Type" - httpHeaderAccept = "accept" - httpHeaderContentLength = "Content-Length" - httpHeaderHost = "Host" - ocspRequestContentType = "application/ocsp-request" - ocspResponseContentType = "application/ocsp-response" -) - -const ( - ocspFailOpenNotSet FailOpenMode = iota - // FailOpenTrue represents OCSP fail open mode. - FailOpenTrue - // FailOpenFalse represents OCSP fail closed mode. - FailOpenFalse -) - -const ( - ocspModeFailOpen = "FAIL_OPEN" - ocspModeFailClosed = "FAIL_CLOSED" - ocspModeInsecure = "INSECURE" -) - -const ocspCacheKey = "ocsp_cache" - -const ( - // defaultOCSPResponderTimeout is the total timeout for OCSP responder. - defaultOCSPResponderTimeout = 10 * time.Second -) - -const ( - // cacheExpire specifies cache data expiration time in seconds. - cacheExpire = float64(24 * 60 * 60) -) - -type ocspCachedResponse struct { - time float64 - producedAt float64 - thisUpdate float64 - nextUpdate float64 - status ocspStatusCode -} - -type Client struct { - // caRoot includes the CA certificates. - caRoot map[string]*x509.Certificate - // certPool includes the CA certificates. - certPool *x509.CertPool - ocspResponseCache *lru.TwoQueueCache - ocspResponseCacheLock sync.RWMutex - // cacheUpdated is true if the memory cache is updated - cacheUpdated bool - logFactory func() hclog.Logger -} - -type ocspStatusCode int - -type ocspStatus struct { - code ocspStatusCode - err error -} - -const ( - ocspSuccess ocspStatusCode = 0 - ocspStatusGood ocspStatusCode = -1 - ocspStatusRevoked ocspStatusCode = -2 - ocspStatusUnknown ocspStatusCode = -3 - ocspStatusOthers ocspStatusCode = -4 - ocspFailedDecomposeRequest ocspStatusCode = -5 - ocspInvalidValidity ocspStatusCode = -6 - ocspMissedCache ocspStatusCode = -7 - ocspCacheExpired ocspStatusCode = -8 -) - -// copied from crypto/ocsp.go -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// cache key -type certIDKey struct { - NameHash string - IssuerKeyHash string - SerialNumber string -} - -// copied from crypto/ocsp -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// copied from crypto/ocsp -func getOIDFromHashAlgorithm(target crypto.Hash) (asn1.ObjectIdentifier, error) { - for hash, oid := range hashOIDs { - if hash == target { - return oid, nil - } - } - return nil, fmt.Errorf("no valid OID is found for the hash algorithm: %v", target) -} - -func (c *Client) ClearCache() { - c.ocspResponseCache.Purge() -} - -func (c *Client) getHashAlgorithmFromOID(target pkix.AlgorithmIdentifier) crypto.Hash { - for hash, oid := range hashOIDs { - if oid.Equal(target.Algorithm) { - return hash - } - } - // no valid hash algorithm is found for the oid. Falling back to SHA1 - return crypto.SHA1 -} - -// isInValidityRange checks the validity -func isInValidityRange(currTime, nextUpdate time.Time) bool { - return !nextUpdate.IsZero() && !currTime.After(nextUpdate) -} - -func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { - r, err := ocsp.ParseRequest(ocspReq) - if err != nil { - return nil, &ocspStatus{ - code: ocspFailedDecomposeRequest, - err: err, - } - } - - // encode CertID, used as a key in the cache - encodedCertID := &certIDKey{ - base64.StdEncoding.EncodeToString(r.IssuerNameHash), - base64.StdEncoding.EncodeToString(r.IssuerKeyHash), - r.SerialNumber.String(), - } - return encodedCertID, &ocspStatus{ - code: ocspSuccess, - } -} - -func (c *Client) encodeCertIDKey(certIDKeyBase64 string) (*certIDKey, error) { - r, err := base64.StdEncoding.DecodeString(certIDKeyBase64) - if err != nil { - return nil, err - } - var cid certID - rest, err := asn1.Unmarshal(r, &cid) - if err != nil { - // error in parsing - return nil, err - } - if len(rest) > 0 { - // extra bytes to the end - return nil, err - } - return &certIDKey{ - base64.StdEncoding.EncodeToString(cid.NameHash), - base64.StdEncoding.EncodeToString(cid.IssuerKeyHash), - cid.SerialNumber.String(), - }, nil -} - -func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) (*ocspStatus, error) { - c.ocspResponseCacheLock.RLock() - var cacheValue *ocspCachedResponse - v, ok := c.ocspResponseCache.Get(*encodedCertID) - if ok { - cacheValue = v.(*ocspCachedResponse) - } - c.ocspResponseCacheLock.RUnlock() - - status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer) - if err != nil { - return nil, err - } - if !isValidOCSPStatus(status.code) { - c.deleteOCSPCache(encodedCertID) - } - return status, err -} - -func (c *Client) deleteOCSPCache(encodedCertID *certIDKey) { - c.ocspResponseCacheLock.Lock() - c.ocspResponseCache.Remove(*encodedCertID) - c.cacheUpdated = true - c.ocspResponseCacheLock.Unlock() -} - -func validateOCSP(ocspRes *ocsp.Response) (*ocspStatus, error) { - curTime := time.Now() - - if ocspRes == nil { - return nil, errors.New("OCSP Response is nil") - } - if !isInValidityRange(curTime, ocspRes.NextUpdate) { - return &ocspStatus{ - code: ocspInvalidValidity, - err: fmt.Errorf("invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v", ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate), - }, nil - } - return returnOCSPStatus(ocspRes), nil -} - -func returnOCSPStatus(ocspRes *ocsp.Response) *ocspStatus { - switch ocspRes.Status { - case ocsp.Good: - return &ocspStatus{ - code: ocspStatusGood, - err: nil, - } - case ocsp.Revoked: - return &ocspStatus{ - code: ocspStatusRevoked, - } - case ocsp.Unknown: - return &ocspStatus{ - code: ocspStatusUnknown, - err: errors.New("OCSP status unknown."), - } - default: - return &ocspStatus{ - code: ocspStatusOthers, - err: fmt.Errorf("OCSP others. %v", ocspRes.Status), - } - } -} - -// retryOCSP is the second level of retry method if the returned contents are corrupted. It often happens with OCSP -// serer and retry helps. -func (c *Client) retryOCSP( - ctx context.Context, - client clientInterface, - req requestFunc, - ocspHost *url.URL, - headers map[string]string, - reqBody []byte, - issuer *x509.Certificate, -) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, retErr error) { - doRequest := func(request *retryablehttp.Request) (*http.Response, error) { - if request != nil { - request = request.WithContext(ctx) - for k, v := range headers { - request.Header[k] = append(request.Header[k], v) - } - } - res, err := client.Do(request) - if err != nil { - return nil, err - } - c.Logger().Debug("StatusCode from OCSP Server:", "statusCode", res.StatusCode) - return res, err - } - - for _, method := range []string{"GET", "POST"} { - reqUrl := *ocspHost - var body []byte - - switch method { - case "GET": - reqUrl.Path = reqUrl.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) - case "POST": - body = reqBody - default: - // Programming error; all request/systems errors are multierror - // and appended. - return nil, nil, nil, fmt.Errorf("unknown request method: %v", method) - } - - var res *http.Response - request, err := req(method, reqUrl.String(), bytes.NewBuffer(body)) - if err != nil { - err = fmt.Errorf("error creating %v request: %w", method, err) - retErr = multierror.Append(retErr, err) - continue - } - if res, err = doRequest(request); err != nil { - err = fmt.Errorf("error doing %v request: %w", method, err) - retErr = multierror.Append(retErr, err) - continue - } else { - defer res.Body.Close() - } - - if res.StatusCode != http.StatusOK { - err = fmt.Errorf("HTTP code is not OK on %v request. %v: %v", method, res.StatusCode, res.Status) - retErr = multierror.Append(retErr, err) - continue - } - - ocspResBytes, err = io.ReadAll(res.Body) - if err != nil { - err = fmt.Errorf("error reading %v request body: %w", method, err) - retErr = multierror.Append(retErr, err) - continue - } - - // Reading an OCSP response shouldn't be fatal. A misconfigured - // endpoint might return invalid results for e.g., GET but return - // valid results for POST on retry. This could happen if e.g., the - // server responds with JSON. - ocspRes, err = ocsp.ParseResponse(ocspResBytes /*issuer = */, nil /* !!unsafe!! */) - if err != nil { - err = fmt.Errorf("error parsing %v OCSP response: %w", method, err) - retErr = multierror.Append(retErr, err) - continue - } - - // Above, we use the unsafe issuer=nil parameter to ocsp.ParseResponse - // because Go's library does the wrong thing. - // - // Here, we lack a full chain, but we know we trust the parent issuer, - // so if the Go library incorrectly discards useful certificates, we - // likely cannot verify this without passing through the full chain - // back to the root. - // - // Instead, take one of two paths: 1. if there is no certificate in - // the ocspRes, verify the OCSP response directly with our trusted - // issuer certificate, or 2. if there is a certificate, either verify - // it directly matches our trusted issuer certificate, or verify it - // is signed by our trusted issuer certificate. - // - // See also: https://github.com/golang/go/issues/59641 - // - // This addresses the !!unsafe!! behavior above. - if ocspRes.Certificate == nil { - if err := ocspRes.CheckSignatureFrom(issuer); err != nil { - err = fmt.Errorf("error directly verifying signature on %v OCSP response: %w", method, err) - retErr = multierror.Append(retErr, err) - continue - } - } else { - // Because we have at least one certificate here, we know that - // Go's ocsp library verified the signature from this certificate - // onto the response and it was valid. Now we need to know we trust - // this certificate. There's two ways we can do this: - // - // 1. Via confirming issuer == ocspRes.Certificate, or - // 2. Via confirming ocspRes.Certificate.CheckSignatureFrom(issuer). - if !bytes.Equal(issuer.Raw, ocspRes.Raw) { - // 1 must not hold, so 2 holds; verify the signature. - if err := ocspRes.Certificate.CheckSignatureFrom(issuer); err != nil { - err = fmt.Errorf("error checking chain of trust on %v OCSP response via %v failed: %w", method, issuer.Subject.String(), err) - retErr = multierror.Append(retErr, err) - continue - } - - // Verify the OCSP responder certificate is still valid and - // contains the required EKU since it is a delegated OCSP - // responder certificate. - if ocspRes.Certificate.NotAfter.Before(time.Now()) { - err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate has expired", method) - retErr = multierror.Append(retErr, err) - continue - } - haveEKU := false - for _, ku := range ocspRes.Certificate.ExtKeyUsage { - if ku == x509.ExtKeyUsageOCSPSigning { - haveEKU = true - break - } - } - if !haveEKU { - err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate lacks the OCSP Signing EKU", method) - retErr = multierror.Append(retErr, err) - continue - } - } - } - - // While we haven't validated the signature on the OCSP response, we - // got what we presume is a definitive answer and simply changing - // methods will likely not help us in that regard. Use this status - // to return without retrying another method, when it looks definitive. - // - // We don't accept ocsp.Unknown here: presumably, we could've hit a CDN - // with static mapping of request->responses, with a default "unknown" - // handler for everything else. By retrying here, we use POST, which - // could hit a live OCSP server with fresher data than the cached CDN. - if ocspRes.Status == ocsp.Good || ocspRes.Status == ocsp.Revoked { - break - } - - // Here, we didn't have a valid response. Even though we didn't get an - // error, we should inform the user that this (valid-looking) response - // wasn't utilized. - err = fmt.Errorf("fetched %v OCSP response of status %v; wanted either good (%v) or revoked (%v)", method, ocspRes.Status, ocsp.Good, ocsp.Revoked) - retErr = multierror.Append(retErr, err) - } - - if ocspRes != nil && ocspResBytes != nil { - // Clear retErr, because we have one parseable-but-maybe-not-quite-correct - // OCSP response. - retErr = nil - ocspS = &ocspStatus{ - code: ocspSuccess, - } - } - - return -} - -// GetRevocationStatus checks the certificate revocation status for subject using issuer certificate. -func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { - status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer) - if err != nil { - return nil, err - } - if isValidOCSPStatus(status.code) { - return status, nil - } - if ocspReq == nil || encodedCertID == nil { - return status, nil - } - c.Logger().Debug("cache missed", "server", subject.OCSPServer) - if len(subject.OCSPServer) == 0 && len(conf.OcspServersOverride) == 0 { - return nil, fmt.Errorf("no OCSP responder URL: subject: %v", subject.Subject) - } - ocspHosts := subject.OCSPServer - if len(conf.OcspServersOverride) > 0 { - ocspHosts = conf.OcspServersOverride - } - - var wg sync.WaitGroup - - ocspStatuses := make([]*ocspStatus, len(ocspHosts)) - ocspResponses := make([]*ocsp.Response, len(ocspHosts)) - errors := make([]error, len(ocspHosts)) - - for i, ocspHost := range ocspHosts { - u, err := url.Parse(ocspHost) - if err != nil { - return nil, err - } - - hostname := u.Hostname() - - headers := make(map[string]string) - headers[httpHeaderContentType] = ocspRequestContentType - headers[httpHeaderAccept] = ocspResponseContentType - headers[httpHeaderContentLength] = strconv.Itoa(len(ocspReq)) - headers[httpHeaderHost] = hostname - timeout := defaultOCSPResponderTimeout - - ocspClient := retryablehttp.NewClient() - ocspClient.HTTPClient.Timeout = timeout - ocspClient.HTTPClient.Transport = newInsecureOcspTransport(conf.ExtraCas) - - doRequest := func() error { - if conf.QueryAllServers { - defer wg.Done() - } - ocspRes, _, ocspS, err := c.retryOCSP( - ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, issuer) - ocspResponses[i] = ocspRes - if err != nil { - errors[i] = err - return err - } - if ocspS.code != ocspSuccess { - ocspStatuses[i] = ocspS - return nil - } - - ret, err := validateOCSP(ocspRes) - if err != nil { - errors[i] = err - return err - } - if isValidOCSPStatus(ret.code) { - ocspStatuses[i] = ret - } - return nil - } - if conf.QueryAllServers { - wg.Add(1) - go doRequest() - } else { - err = doRequest() - if err == nil { - break - } - } - } - if conf.QueryAllServers { - wg.Wait() - } - // Good by default - var ret *ocspStatus - ocspRes := ocspResponses[0] - var firstError error - for i := range ocspHosts { - if errors[i] != nil { - if firstError == nil { - firstError = errors[i] - } - } else if ocspStatuses[i] != nil { - switch ocspStatuses[i].code { - case ocspStatusRevoked: - ret = ocspStatuses[i] - ocspRes = ocspResponses[i] - break - case ocspStatusGood: - // Use this response only if we don't have a status already, or if what we have was unknown - if ret == nil || ret.code == ocspStatusUnknown { - ret = ocspStatuses[i] - ocspRes = ocspResponses[i] - } - case ocspStatusUnknown: - if ret == nil { - // We may want to use this as the overall result - ret = ocspStatuses[i] - ocspRes = ocspResponses[i] - } - } - } - } - - // If no server reported the cert revoked, but we did have an error, report it - if (ret == nil || ret.code == ocspStatusUnknown) && firstError != nil { - return nil, firstError - } - // otherwise ret should contain a response for the overall request - - if !isValidOCSPStatus(ret.code) { - return ret, nil - } - v := ocspCachedResponse{ - status: ret.code, - time: float64(time.Now().UTC().Unix()), - producedAt: float64(ocspRes.ProducedAt.UTC().Unix()), - thisUpdate: float64(ocspRes.ThisUpdate.UTC().Unix()), - nextUpdate: float64(ocspRes.NextUpdate.UTC().Unix()), - } - - c.ocspResponseCacheLock.Lock() - c.ocspResponseCache.Add(*encodedCertID, &v) - c.cacheUpdated = true - c.ocspResponseCacheLock.Unlock() - return ret, nil -} - -func isValidOCSPStatus(status ocspStatusCode) bool { - return status == ocspStatusGood || status == ocspStatusRevoked || status == ocspStatusUnknown -} - -type VerifyConfig struct { - OcspEnabled bool - ExtraCas []*x509.Certificate - OcspServersOverride []string - OcspFailureMode FailOpenMode - QueryAllServers bool -} - -// VerifyLeafCertificate verifies just the subject against it's direct issuer -func (c *Client) VerifyLeafCertificate(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) error { - results, err := c.GetRevocationStatus(ctx, subject, issuer, conf) - if err != nil { - return err - } - if results.code == ocspStatusGood { - return nil - } else { - serial := issuer.SerialNumber - serialHex := strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) - if results.code == ocspStatusRevoked { - return fmt.Errorf("certificate with serial number %s has been revoked", serialHex) - } else if conf.OcspFailureMode == FailOpenFalse { - return fmt.Errorf("unknown OCSP status for cert with serial number %s", strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":"))) - } else { - c.Logger().Warn("could not validate OCSP status for cert, but continuing in fail open mode", "serial", serialHex) - } - } - return nil -} - -// VerifyPeerCertificate verifies all of certificate revocation status -func (c *Client) VerifyPeerCertificate(ctx context.Context, verifiedChains [][]*x509.Certificate, conf *VerifyConfig) error { - for i := 0; i < len(verifiedChains); i++ { - // Certificate signed by Root CA. This should be one before the last in the Certificate Chain - numberOfNoneRootCerts := len(verifiedChains[i]) - 1 - if !verifiedChains[i][numberOfNoneRootCerts].IsCA || string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer) != string(verifiedChains[i][numberOfNoneRootCerts].RawSubject) { - // Check if the last Non Root Cert is also a CA or is self signed. - // if the last certificate is not, add it to the list - rca := c.caRoot[string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer)] - if rca == nil { - return fmt.Errorf("failed to find root CA. pkix.name: %v", verifiedChains[i][numberOfNoneRootCerts].Issuer) - } - verifiedChains[i] = append(verifiedChains[i], rca) - numberOfNoneRootCerts++ - } - results, err := c.GetAllRevocationStatus(ctx, verifiedChains[i], conf) - if err != nil { - return err - } - if r := c.canEarlyExitForOCSP(results, numberOfNoneRootCerts, conf); r != nil { - return r.err - } - } - - return nil -} - -func (c *Client) canEarlyExitForOCSP(results []*ocspStatus, chainSize int, conf *VerifyConfig) *ocspStatus { - msg := "" - if conf.OcspFailureMode == FailOpenFalse { - // Fail closed. any error is returned to stop connection - for _, r := range results { - if r.err != nil { - return r - } - } - } else { - // Fail open and all results are valid. - allValid := len(results) == chainSize - for _, r := range results { - if !isValidOCSPStatus(r.code) { - allValid = false - break - } - } - for _, r := range results { - if allValid && r.code == ocspStatusRevoked { - return r - } - if r != nil && r.code != ocspStatusGood && r.err != nil { - msg += "" + r.err.Error() - } - } - } - if len(msg) > 0 { - c.Logger().Warn( - "OCSP is set to fail-open, and could not retrieve OCSP based revocation checking but proceeding.", "detail", msg) - } - return nil -} - -func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate) (bool, error) { - n := len(verifiedChains) - 1 - for j := 0; j < n; j++ { - subject := verifiedChains[j] - issuer := verifiedChains[j+1] - status, _, _, err := c.validateWithCache(subject, issuer) - if err != nil { - return false, err - } - if !isValidOCSPStatus(status.code) { - return false, nil - } - } - return true, nil -} - -func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStatus, []byte, *certIDKey, error) { - ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to create OCSP request from the certificates: %v", err) - } - encodedCertID, ocspS := extractCertIDKeyFromRequest(ocspReq) - if ocspS.code != ocspSuccess { - return nil, nil, nil, fmt.Errorf("failed to extract CertID from OCSP Request: %v", err) - } - status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer) - if err != nil { - return nil, nil, nil, err - } - return status, ocspReq, encodedCertID, nil -} - -func (c *Client) GetAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certificate, conf *VerifyConfig) ([]*ocspStatus, error) { - _, err := c.validateWithCacheForAllCertificates(verifiedChains) - if err != nil { - return nil, err - } - n := len(verifiedChains) - 1 - results := make([]*ocspStatus, n) - for j := 0; j < n; j++ { - results[j], err = c.GetRevocationStatus(ctx, verifiedChains[j], verifiedChains[j+1], conf) - if err != nil { - return nil, err - } - if !isValidOCSPStatus(results[j].code) { - return results, nil - } - } - return results, nil -} - -// verifyPeerCertificateSerial verifies the certificate revocation status in serial. -func (c *Client) verifyPeerCertificateSerial(conf *VerifyConfig) func(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { - return func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { - return c.VerifyPeerCertificate(context.TODO(), verifiedChains, conf) - } -} - -func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse) (*ocspStatus, error) { - return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil) -} - -func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate) (*ocspStatus, error) { - subjectName := "Unknown" - if subject != nil { - subjectName = subject.Subject.CommonName - } - - curTime := time.Now() - if cacheValue == nil { - return &ocspStatus{ - code: ocspMissedCache, - err: fmt.Errorf("miss cache data. subject: %v", subjectName), - }, nil - } - currentTime := float64(curTime.UTC().Unix()) - if currentTime-cacheValue.time >= cacheExpire { - return &ocspStatus{ - code: ocspCacheExpired, - err: fmt.Errorf("cache expired. current: %v, cache: %v", - time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(cacheValue.time), 0).UTC()), - }, nil - } - - return validateOCSP(&ocsp.Response{ - ProducedAt: time.Unix(int64(cacheValue.producedAt), 0).UTC(), - ThisUpdate: time.Unix(int64(cacheValue.thisUpdate), 0).UTC(), - NextUpdate: time.Unix(int64(cacheValue.nextUpdate), 0).UTC(), - Status: int(cacheValue.status), - }) -} - -/* -// writeOCSPCache writes a OCSP Response cache -func (c *Client) writeOCSPCache(ctx context.Context, storage logical.Storage) error { - c.Logger().Debug("writing OCSP Response cache") - t := time.Now() - m := make(map[string][]interface{}) - keys := c.ocspResponseCache.Keys() - if len(keys) > persistedCacheSize { - keys = keys[:persistedCacheSize] - } - for _, k := range keys { - e, ok := c.ocspResponseCache.Get(k) - if ok { - entry := e.(*ocspCachedResponse) - // Don't store if expired - if isInValidityRange(t, time.Unix(int64(entry.thisUpdate), 0), time.Unix(int64(entry.nextUpdate), 0)) { - key := k.(certIDKey) - cacheKeyInBase64, err := decodeCertIDKey(&key) - if err != nil { - return err - } - m[cacheKeyInBase64] = []interface{}{entry.status, entry.time, entry.producedAt, entry.thisUpdate, entry.nextUpdate} - } - } - } - - v, err := jsonutil.EncodeJSONAndCompress(m, nil) - if err != nil { - return err - } - entry := logical.StorageEntry{ - Key: ocspCacheKey, - Value: v, - } - return storage.Put(ctx, &entry) -} - -// readOCSPCache reads a OCSP Response cache from storage -func (c *Client) readOCSPCache(ctx context.Context, storage logical.Storage) error { - c.Logger().Debug("reading OCSP Response cache") - - entry, err := storage.Get(ctx, ocspCacheKey) - if err != nil { - return err - } - if entry == nil { - return nil - } - var untypedCache map[string][]interface{} - - err = jsonutil.DecodeJSON(entry.Value, &untypedCache) - if err != nil { - return errors.New("failed to unmarshal OCSP cache") - } - - for k, v := range untypedCache { - key, err := c.encodeCertIDKey(k) - if err != nil { - return err - } - var times [4]float64 - for i, t := range v[1:] { - if jn, ok := t.(json.Number); ok { - times[i], err = jn.Float64() - if err != nil { - return err - } - } else { - times[i] = t.(float64) - } - } - var status int - if jn, ok := v[0].(json.Number); ok { - s, err := jn.Int64() - if err != nil { - return err - } - status = int(s) - } else { - status = v[0].(int) - } - - c.ocspResponseCache.Add(*key, &ocspCachedResponse{ - status: ocspStatusCode(status), - time: times[0], - producedAt: times[1], - thisUpdate: times[2], - nextUpdate: times[3], - }) - } - - return nil -} -*/ - -func New(logFactory func() hclog.Logger, cacheSize int) *Client { - if cacheSize < 100 { - cacheSize = 100 - } - cache, _ := lru.New2Q(cacheSize) - c := Client{ - caRoot: make(map[string]*x509.Certificate), - ocspResponseCache: cache, - logFactory: logFactory, - } - - return &c -} - -func (c *Client) Logger() hclog.Logger { - return c.logFactory() -} - -// insecureOcspTransport is the transport object that doesn't do certificate revocation check. -func newInsecureOcspTransport(extraCas []*x509.Certificate) *http.Transport { - // Get the SystemCertPool, continue with an empty pool on error - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - for _, c := range extraCas { - rootCAs.AddCert(c) - } - config := &tls.Config{ - RootCAs: rootCAs, - } - return &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Minute, - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSClientConfig: config, - } -} - -// NewTransport includes the certificate revocation check with OCSP in sequential. -func (c *Client) NewTransport(conf *VerifyConfig) *http.Transport { - rootCAs := c.certPool - if rootCAs == nil { - rootCAs, _ = x509.SystemCertPool() - } - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - for _, c := range conf.ExtraCas { - rootCAs.AddCert(c) - } - return &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - VerifyPeerCertificate: c.verifyPeerCertificateSerial(conf), - }, - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Minute, - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - } -} - -/* -func (c *Client) WriteCache(ctx context.Context, storage logical.Storage) error { - c.ocspResponseCacheLock.Lock() - defer c.ocspResponseCacheLock.Unlock() - if c.cacheUpdated { - err := c.writeOCSPCache(ctx, storage) - if err == nil { - c.cacheUpdated = false - } - return err - } - return nil -} - -func (c *Client) ReadCache(ctx context.Context, storage logical.Storage) error { - c.ocspResponseCacheLock.Lock() - defer c.ocspResponseCacheLock.Unlock() - return c.readOCSPCache(ctx, storage) -} -*/ -/* - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ diff --git a/sdk/helper/ocsp/ocsp_test.go b/sdk/helper/ocsp/ocsp_test.go deleted file mode 100644 index 2f3f1976d2a80..0000000000000 --- a/sdk/helper/ocsp/ocsp_test.go +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. - -package ocsp - -import ( - "bytes" - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" - lru "github.com/hashicorp/golang-lru" - "golang.org/x/crypto/ocsp" -) - -func TestOCSP(t *testing.T) { - targetURL := []string{ - "https://sfcdev1.blob.core.windows.net/", - "https://sfctest0.snowflakecomputing.com/", - "https://s3-us-west-2.amazonaws.com/sfc-snowsql-updates/?prefix=1.1/windows_x86_64", - } - - conf := VerifyConfig{ - OcspFailureMode: FailOpenFalse, - } - c := New(testLogFactory, 10) - transports := []*http.Transport{ - newInsecureOcspTransport(nil), - c.NewTransport(&conf), - } - - for _, tgt := range targetURL { - c.ocspResponseCache, _ = lru.New2Q(10) - for _, tr := range transports { - c := &http.Client{ - Transport: tr, - Timeout: 30 * time.Second, - } - req, err := http.NewRequest("GET", tgt, bytes.NewReader(nil)) - if err != nil { - t.Fatalf("fail to create a request. err: %v", err) - } - res, err := c.Do(req) - if err != nil { - t.Fatalf("failed to GET contents. err: %v", err) - } - defer res.Body.Close() - _, err = ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("failed to read content body for %v", tgt) - } - - } - } -} - -/** -// Used for development, requires an active Vault with PKI setup -func TestMultiOCSP(t *testing.T) { - - targetURL := []string{ - "https://localhost:8200/v1/pki/ocsp", - "https://localhost:8200/v1/pki/ocsp", - "https://localhost:8200/v1/pki/ocsp", - } - - b, _ := pem.Decode([]byte(vaultCert)) - caCert, _ := x509.ParseCertificate(b.Bytes) - conf := VerifyConfig{ - OcspFailureMode: FailOpenFalse, - QueryAllServers: true, - OcspServersOverride: targetURL, - ExtraCas: []*x509.Certificate{caCert}, - } - c := New(testLogFactory, 10) - transports := []*http.Transport{ - newInsecureOcspTransport(conf.ExtraCas), - c.NewTransport(&conf), - } - - tgt := "https://localhost:8200/v1/pki/ca/pem" - c.ocspResponseCache, _ = lru.New2Q(10) - for _, tr := range transports { - c := &http.Client{ - Transport: tr, - Timeout: 30 * time.Second, - } - req, err := http.NewRequest("GET", tgt, bytes.NewReader(nil)) - if err != nil { - t.Fatalf("fail to create a request. err: %v", err) - } - res, err := c.Do(req) - if err != nil { - t.Fatalf("failed to GET contents. err: %v", err) - } - defer res.Body.Close() - _, err = ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("failed to read content body for %v", tgt) - } - } -} -*/ - -func TestUnitEncodeCertIDGood(t *testing.T) { - targetURLs := []string{ - "faketestaccount.snowflakecomputing.com:443", - "s3-us-west-2.amazonaws.com:443", - "sfcdev1.blob.core.windows.net:443", - } - for _, tt := range targetURLs { - chainedCerts := getCert(tt) - for i := 0; i < len(chainedCerts)-1; i++ { - subject := chainedCerts[i] - issuer := chainedCerts[i+1] - ocspServers := subject.OCSPServer - if len(ocspServers) == 0 { - t.Fatalf("no OCSP server is found. cert: %v", subject.Subject) - } - ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) - if err != nil { - t.Fatalf("failed to create OCSP request. err: %v", err) - } - var ost *ocspStatus - _, ost = extractCertIDKeyFromRequest(ocspReq) - if ost.err != nil { - t.Fatalf("failed to extract cert ID from the OCSP request. err: %v", ost.err) - } - // better hash. Not sure if the actual OCSP server accepts this, though. - ocspReq, err = ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{Hash: crypto.SHA512}) - if err != nil { - t.Fatalf("failed to create OCSP request. err: %v", err) - } - _, ost = extractCertIDKeyFromRequest(ocspReq) - if ost.err != nil { - t.Fatalf("failed to extract cert ID from the OCSP request. err: %v", ost.err) - } - // tweaked request binary - ocspReq, err = ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{Hash: crypto.SHA512}) - if err != nil { - t.Fatalf("failed to create OCSP request. err: %v", err) - } - ocspReq[10] = 0 // random change - _, ost = extractCertIDKeyFromRequest(ocspReq) - if ost.err == nil { - t.Fatal("should have failed") - } - } - } -} - -func TestUnitCheckOCSPResponseCache(t *testing.T) { - c := New(testLogFactory, 10) - dummyKey0 := certIDKey{ - NameHash: "dummy0", - IssuerKeyHash: "dummy0", - SerialNumber: "dummy0", - } - dummyKey := certIDKey{ - NameHash: "dummy1", - IssuerKeyHash: "dummy1", - SerialNumber: "dummy1", - } - currentTime := float64(time.Now().UTC().Unix()) - c.ocspResponseCache.Add(dummyKey0, &ocspCachedResponse{time: currentTime}) - subject := &x509.Certificate{} - issuer := &x509.Certificate{} - ost, err := c.checkOCSPResponseCache(&dummyKey, subject, issuer) - if err != nil { - t.Fatal(err) - } - if ost.code != ocspMissedCache { - t.Fatalf("should have failed. expected: %v, got: %v", ocspMissedCache, ost.code) - } - // old timestamp - c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(1395054952)}) - ost, err = c.checkOCSPResponseCache(&dummyKey, subject, issuer) - if err != nil { - t.Fatal(err) - } - if ost.code != ocspCacheExpired { - t.Fatalf("should have failed. expected: %v, got: %v", ocspCacheExpired, ost.code) - } - - // invalid validity - c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(currentTime - 1000)}) - ost, err = c.checkOCSPResponseCache(&dummyKey, subject, nil) - if err == nil && isValidOCSPStatus(ost.code) { - t.Fatalf("should have failed.") - } -} - -func TestUnitValidateOCSP(t *testing.T) { - ocspRes := &ocsp.Response{} - ost, err := validateOCSP(ocspRes) - if err == nil && isValidOCSPStatus(ost.code) { - t.Fatalf("should have failed.") - } - - currentTime := time.Now() - ocspRes.ThisUpdate = currentTime.Add(-2 * time.Hour) - ocspRes.NextUpdate = currentTime.Add(2 * time.Hour) - ocspRes.Status = ocsp.Revoked - ost, err = validateOCSP(ocspRes) - if err != nil { - t.Fatal(err) - } - - if ost.code != ocspStatusRevoked { - t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusRevoked, ost.code) - } - ocspRes.Status = ocsp.Good - ost, err = validateOCSP(ocspRes) - if err != nil { - t.Fatal(err) - } - - if ost.code != ocspStatusGood { - t.Fatalf("should have success. expected: %v, got: %v", ocspStatusGood, ost.code) - } - ocspRes.Status = ocsp.Unknown - ost, err = validateOCSP(ocspRes) - if err != nil { - t.Fatal(err) - } - if ost.code != ocspStatusUnknown { - t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusUnknown, ost.code) - } - ocspRes.Status = ocsp.ServerFailed - ost, err = validateOCSP(ocspRes) - if err != nil { - t.Fatal(err) - } - if ost.code != ocspStatusOthers { - t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusOthers, ost.code) - } -} - -func TestUnitEncodeCertID(t *testing.T) { - var st *ocspStatus - _, st = extractCertIDKeyFromRequest([]byte{0x1, 0x2}) - if st.code != ocspFailedDecomposeRequest { - t.Fatalf("failed to get OCSP status. expected: %v, got: %v", ocspFailedDecomposeRequest, st.code) - } -} - -func getCert(addr string) []*x509.Certificate { - tcpConn, err := net.DialTimeout("tcp", addr, 40*time.Second) - if err != nil { - panic(err) - } - defer tcpConn.Close() - - err = tcpConn.SetDeadline(time.Now().Add(10 * time.Second)) - if err != nil { - panic(err) - } - config := tls.Config{InsecureSkipVerify: true, ServerName: addr} - - conn := tls.Client(tcpConn, &config) - defer conn.Close() - - err = conn.Handshake() - if err != nil { - panic(err) - } - - state := conn.ConnectionState() - - return state.PeerCertificates -} - -func TestOCSPRetry(t *testing.T) { - c := New(testLogFactory, 10) - certs := getCert("s3-us-west-2.amazonaws.com:443") - dummyOCSPHost := &url.URL{ - Scheme: "https", - Host: "dummyOCSPHost", - } - client := &fakeHTTPClient{ - cnt: 3, - success: true, - body: []byte{1, 2, 3}, - logger: hclog.New(hclog.DefaultOptions), - t: t, - } - res, b, st, err := c.retryOCSP( - context.TODO(), - client, fakeRequestFunc, - dummyOCSPHost, - make(map[string]string), []byte{0}, certs[len(certs)-1]) - if err == nil { - fmt.Printf("should fail: %v, %v, %v\n", res, b, st) - } - client = &fakeHTTPClient{ - cnt: 30, - success: true, - body: []byte{1, 2, 3}, - logger: hclog.New(hclog.DefaultOptions), - t: t, - } - res, b, st, err = c.retryOCSP( - context.TODO(), - client, fakeRequestFunc, - dummyOCSPHost, - make(map[string]string), []byte{0}, certs[len(certs)-1]) - if err == nil { - fmt.Printf("should fail: %v, %v, %v\n", res, b, st) - } -} - -type tcCanEarlyExit struct { - results []*ocspStatus - resultLen int - retFailOpen *ocspStatus - retFailClosed *ocspStatus -} - -func TestCanEarlyExitForOCSP(t *testing.T) { - testcases := []tcCanEarlyExit{ - { // 0 - results: []*ocspStatus{ - { - code: ocspStatusGood, - }, - { - code: ocspStatusGood, - }, - { - code: ocspStatusGood, - }, - }, - retFailOpen: nil, - retFailClosed: nil, - }, - { // 1 - results: []*ocspStatus{ - { - code: ocspStatusRevoked, - err: errors.New("revoked"), - }, - { - code: ocspStatusGood, - }, - { - code: ocspStatusGood, - }, - }, - retFailOpen: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, - retFailClosed: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, - }, - { // 2 - results: []*ocspStatus{ - { - code: ocspStatusUnknown, - err: errors.New("unknown"), - }, - { - code: ocspStatusGood, - }, - { - code: ocspStatusGood, - }, - }, - retFailOpen: nil, - retFailClosed: &ocspStatus{ocspStatusUnknown, errors.New("unknown")}, - }, - { // 3: not taken as revoked if any invalid OCSP response (ocspInvalidValidity) is included. - results: []*ocspStatus{ - { - code: ocspStatusRevoked, - err: errors.New("revoked"), - }, - { - code: ocspInvalidValidity, - }, - { - code: ocspStatusGood, - }, - }, - retFailOpen: nil, - retFailClosed: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, - }, - { // 4: not taken as revoked if the number of results don't match the expected results. - results: []*ocspStatus{ - { - code: ocspStatusRevoked, - err: errors.New("revoked"), - }, - { - code: ocspStatusGood, - }, - }, - resultLen: 3, - retFailOpen: nil, - retFailClosed: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, - }, - } - c := New(testLogFactory, 10) - for idx, tt := range testcases { - expectedLen := len(tt.results) - if tt.resultLen > 0 { - expectedLen = tt.resultLen - } - r := c.canEarlyExitForOCSP(tt.results, expectedLen, &VerifyConfig{OcspFailureMode: FailOpenTrue}) - if !(tt.retFailOpen == nil && r == nil) && !(tt.retFailOpen != nil && r != nil && tt.retFailOpen.code == r.code) { - t.Fatalf("%d: failed to match return. expected: %v, got: %v", idx, tt.retFailOpen, r) - } - r = c.canEarlyExitForOCSP(tt.results, expectedLen, &VerifyConfig{OcspFailureMode: FailOpenFalse}) - if !(tt.retFailClosed == nil && r == nil) && !(tt.retFailClosed != nil && r != nil && tt.retFailClosed.code == r.code) { - t.Fatalf("%d: failed to match return. expected: %v, got: %v", idx, tt.retFailClosed, r) - } - } -} - -var testLogger = hclog.New(hclog.DefaultOptions) - -func testLogFactory() hclog.Logger { - return testLogger -} - -type fakeHTTPClient struct { - cnt int // number of retry - success bool // return success after retry in cnt times - timeout bool // timeout - body []byte // return body - t *testing.T - logger hclog.Logger - redirected bool -} - -func (c *fakeHTTPClient) Do(_ *retryablehttp.Request) (*http.Response, error) { - c.cnt-- - if c.cnt < 0 { - c.cnt = 0 - } - c.t.Log("fakeHTTPClient.cnt", c.cnt) - - var retcode int - if !c.redirected { - c.redirected = true - c.cnt++ - retcode = 405 - } else if c.success && c.cnt == 1 { - retcode = 200 - } else { - if c.timeout { - // simulate timeout - time.Sleep(time.Second * 1) - return nil, &fakeHTTPError{ - err: "Whatever reason (Client.Timeout exceeded while awaiting headers)", - timeout: true, - } - } - retcode = 0 - } - - ret := &http.Response{ - StatusCode: retcode, - Body: &fakeResponseBody{body: c.body}, - } - return ret, nil -} - -type fakeHTTPError struct { - err string - timeout bool -} - -func (e *fakeHTTPError) Error() string { return e.err } -func (e *fakeHTTPError) Timeout() bool { return e.timeout } -func (e *fakeHTTPError) Temporary() bool { return true } - -type fakeResponseBody struct { - body []byte - cnt int -} - -func (b *fakeResponseBody) Read(p []byte) (n int, err error) { - if b.cnt == 0 { - copy(p, b.body) - b.cnt = 1 - return len(b.body), nil - } - b.cnt = 0 - return 0, io.EOF -} - -func (b *fakeResponseBody) Close() error { - return nil -} - -func fakeRequestFunc(_, _ string, _ interface{}) (*retryablehttp.Request, error) { - return nil, nil -} - -const vaultCert = `-----BEGIN CERTIFICATE----- -MIIDuTCCAqGgAwIBAgIUA6VeVD1IB5rXcCZRAqPO4zr/GAMwDQYJKoZIhvcNAQEL -BQAwcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 -eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRowGAYD -VQQDDBF3d3cuY29uaHVnZWNvLmNvbTAeFw0yMjA5MDcxOTA1MzdaFw0yNDA5MDYx -OTA1MzdaMHIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTERMA8GA1UEBwwIU29t -ZUNpdHkxEjAQBgNVBAoMCU15Q29tcGFueTETMBEGA1UECwwKTXlEaXZpc2lvbjEa -MBgGA1UEAwwRd3d3LmNvbmh1Z2Vjby5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQDL9qzEXi4PIafSAqfcwcmjujFvbG1QZbI8swxnD+w8i4ufAQU5 -LDmvMrGo3ZbhJ0mCihYmFxpjhRdP2raJQ9TysHlPXHtDRpr9ckWTKBz2oIfqVtJ2 -qzteQkWCkDAO7kPqzgCFsMeoMZeONRkeGib0lEzQAbW/Rqnphg8zVVkyQ71DZ7Pc -d5WkC2E28kKcSramhWfVFpxG3hSIrLOX2esEXteLRzKxFPf+gi413JZFKYIWrebP -u5t0++MLNpuX322geoki4BWMjQsd47XILmxZ4aj33ScZvdrZESCnwP76hKIxg9mO -lMxrqSWKVV5jHZrElSEj9LYJgDO1Y6eItn7hAgMBAAGjRzBFMAsGA1UdDwQEAwIE -MDATBgNVHSUEDDAKBggrBgEFBQcDATAhBgNVHREEGjAYggtleGFtcGxlLmNvbYIJ -bG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQA5dPdf5SdtMwe2uSspO/EuWqbM -497vMQBW1Ey8KRKasJjhvOVYMbe7De5YsnW4bn8u5pl0zQGF4hEtpmifAtVvziH/ -K+ritQj9VVNbLLCbFcg+b0kfjt4yrDZ64vWvIeCgPjG1Kme8gdUUWgu9dOud5gdx -qg/tIFv4TRS/eIIymMlfd9owOD3Ig6S5fy4NaAJFAwXf8+3Rzuc+e7JSAPgAufjh -tOTWinxvoiOLuYwo9CyGgq4qKBFsrY0aE0gdA7oTQkpbEbo2EbqiWUl/PTCl1Y4Z -nSZ0n+4q9QC9RLrWwYTwh838d5RVLUst2mBKSA+vn7YkqmBJbdBC6nkd7n7H ------END CERTIFICATE----- -` diff --git a/sdk/helper/parseutil/parseutil.go b/sdk/helper/parseutil/parseutil.go index 5bea8909de142..eda539424f2d1 100644 --- a/sdk/helper/parseutil/parseutil.go +++ b/sdk/helper/parseutil/parseutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package parseutil diff --git a/sdk/helper/password/password.go b/sdk/helper/password/password.go index 931a72cc8bc33..84e6b594d55d2 100644 --- a/sdk/helper/password/password.go +++ b/sdk/helper/password/password.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package password diff --git a/sdk/helper/pathmanager/pathmanager.go b/sdk/helper/pathmanager/pathmanager.go index 0d2d60070f789..e0e39445b2a56 100644 --- a/sdk/helper/pathmanager/pathmanager.go +++ b/sdk/helper/pathmanager/pathmanager.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pathmanager import ( diff --git a/sdk/helper/pathmanager/pathmanager_test.go b/sdk/helper/pathmanager/pathmanager_test.go index 515d830324f1e..7d6207b625e60 100644 --- a/sdk/helper/pathmanager/pathmanager_test.go +++ b/sdk/helper/pathmanager/pathmanager_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pathmanager import ( diff --git a/sdk/helper/pluginutil/env.go b/sdk/helper/pluginutil/env.go index 1b45ef32dca22..df1fdbeede939 100644 --- a/sdk/helper/pluginutil/env.go +++ b/sdk/helper/pluginutil/env.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( diff --git a/sdk/helper/pluginutil/env_test.go b/sdk/helper/pluginutil/env_test.go index 21f77faba6e63..1d04b327524e5 100644 --- a/sdk/helper/pluginutil/env_test.go +++ b/sdk/helper/pluginutil/env_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( diff --git a/sdk/helper/pluginutil/multiplexing.go b/sdk/helper/pluginutil/multiplexing.go index 8fc86a4c36c96..41316ec49df28 100644 --- a/sdk/helper/pluginutil/multiplexing.go +++ b/sdk/helper/pluginutil/multiplexing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( diff --git a/sdk/helper/pluginutil/multiplexing.pb.go b/sdk/helper/pluginutil/multiplexing.pb.go index d7663b90215f5..d7073b10e0a5b 100644 --- a/sdk/helper/pluginutil/multiplexing.pb.go +++ b/sdk/helper/pluginutil/multiplexing.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/helper/pluginutil/multiplexing.proto package pluginutil diff --git a/sdk/helper/pluginutil/multiplexing.proto b/sdk/helper/pluginutil/multiplexing.proto index c1a2ca0a4aa49..aa2438b070ffb 100644 --- a/sdk/helper/pluginutil/multiplexing.proto +++ b/sdk/helper/pluginutil/multiplexing.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; package pluginutil.multiplexing; diff --git a/sdk/helper/pluginutil/multiplexing_test.go b/sdk/helper/pluginutil/multiplexing_test.go index 3f589ffa7cd9e..125a4a120c624 100644 --- a/sdk/helper/pluginutil/multiplexing_test.go +++ b/sdk/helper/pluginutil/multiplexing_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( diff --git a/sdk/helper/pluginutil/run_config.go b/sdk/helper/pluginutil/run_config.go index be34fa9dc09b7..3eb8fb2b28173 100644 --- a/sdk/helper/pluginutil/run_config.go +++ b/sdk/helper/pluginutil/run_config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( @@ -13,6 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/version" ) type PluginClientConfig struct { @@ -48,11 +46,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) } - version, err := rc.Wrapper.VaultVersion(ctx) - if err != nil { - return nil, err - } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version)) + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) if rc.IsMetadataMode { rc.Logger = rc.Logger.With("metadata", "true") diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go index e64057783ad0d..3c2fef2196c14 100644 --- a/sdk/helper/pluginutil/run_config_test.go +++ b/sdk/helper/pluginutil/run_config_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( @@ -10,6 +7,8 @@ import ( "testing" "time" + "github.com/hashicorp/vault/sdk/version" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/wrapping" @@ -77,7 +76,7 @@ func TestMakeConfig(t *testing.T) { []string{"foo", "bar"}, []string{ "initial=true", - fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), }, @@ -143,7 +142,7 @@ func TestMakeConfig(t *testing.T) { []string{ "initial=true", fmt.Sprintf("%s=%t", PluginMlockEnabled, true), - fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, "testtoken"), @@ -206,7 +205,7 @@ func TestMakeConfig(t *testing.T) { []string{"foo", "bar"}, []string{ "initial=true", - fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), }, @@ -268,7 +267,7 @@ func TestMakeConfig(t *testing.T) { []string{"foo", "bar"}, []string{ "initial=true", - fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), }, @@ -340,10 +339,6 @@ type mockRunnerUtil struct { mock.Mock } -func (m *mockRunnerUtil) VaultVersion(ctx context.Context) (string, error) { - return "dummyversion", nil -} - func (m *mockRunnerUtil) NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error) { args := m.Called(ctx, config) return args.Get(0).(PluginClient), args.Error(1) diff --git a/sdk/helper/pluginutil/runner.go b/sdk/helper/pluginutil/runner.go index 977f95d72208a..631c4f3a2f35d 100644 --- a/sdk/helper/pluginutil/runner.go +++ b/sdk/helper/pluginutil/runner.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( @@ -30,7 +27,6 @@ type RunnerUtil interface { NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) MlockEnabled() bool - VaultVersion(ctx context.Context) (string, error) } // LookRunnerUtil defines the functions for both Looker and Wrapper diff --git a/sdk/helper/pluginutil/tls.go b/sdk/helper/pluginutil/tls.go index 21b35d910e798..c5fff6d701ed7 100644 --- a/sdk/helper/pluginutil/tls.go +++ b/sdk/helper/pluginutil/tls.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pluginutil import ( diff --git a/sdk/helper/pointerutil/pointer.go b/sdk/helper/pointerutil/pointer.go index b4bfe114cfdf9..0f26e7dad660a 100644 --- a/sdk/helper/pointerutil/pointer.go +++ b/sdk/helper/pointerutil/pointer.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pointerutil import ( diff --git a/sdk/helper/policyutil/policyutil.go b/sdk/helper/policyutil/policyutil.go index a5a8082e13c22..8e5541b1868fc 100644 --- a/sdk/helper/policyutil/policyutil.go +++ b/sdk/helper/policyutil/policyutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package policyutil import ( diff --git a/sdk/helper/policyutil/policyutil_test.go b/sdk/helper/policyutil/policyutil_test.go index 2280ba93eed87..4b26483f716af 100644 --- a/sdk/helper/policyutil/policyutil_test.go +++ b/sdk/helper/policyutil/policyutil_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package policyutil import "testing" diff --git a/sdk/helper/roottoken/decode.go b/sdk/helper/roottoken/decode.go index 9939b67f72f44..cc9300690a4ae 100644 --- a/sdk/helper/roottoken/decode.go +++ b/sdk/helper/roottoken/decode.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package roottoken import ( diff --git a/sdk/helper/roottoken/encode.go b/sdk/helper/roottoken/encode.go index dbbc90a2afa30..2537d93979064 100644 --- a/sdk/helper/roottoken/encode.go +++ b/sdk/helper/roottoken/encode.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package roottoken import ( diff --git a/sdk/helper/roottoken/encode_test.go b/sdk/helper/roottoken/encode_test.go index 269bf65b04726..9df26928e2942 100644 --- a/sdk/helper/roottoken/encode_test.go +++ b/sdk/helper/roottoken/encode_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package roottoken import ( diff --git a/sdk/helper/roottoken/otp.go b/sdk/helper/roottoken/otp.go index 4445ec52dc653..5a12c4f0ae869 100644 --- a/sdk/helper/roottoken/otp.go +++ b/sdk/helper/roottoken/otp.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package roottoken import ( diff --git a/sdk/helper/roottoken/otp_test.go b/sdk/helper/roottoken/otp_test.go index 53776ec21c8b5..437e8f3d0f22e 100644 --- a/sdk/helper/roottoken/otp_test.go +++ b/sdk/helper/roottoken/otp_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package roottoken import ( diff --git a/sdk/helper/salt/salt.go b/sdk/helper/salt/salt.go index 84cbd03556c27..50e0cad90a607 100644 --- a/sdk/helper/salt/salt.go +++ b/sdk/helper/salt/salt.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package salt import ( diff --git a/sdk/helper/salt/salt_test.go b/sdk/helper/salt/salt_test.go index 3aec9a27b4990..99fcb06bd053b 100644 --- a/sdk/helper/salt/salt_test.go +++ b/sdk/helper/salt/salt_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package salt import ( diff --git a/sdk/helper/strutil/strutil.go b/sdk/helper/strutil/strutil.go index a9e506942af59..09cc9425cb1db 100644 --- a/sdk/helper/strutil/strutil.go +++ b/sdk/helper/strutil/strutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package strutil diff --git a/sdk/helper/template/funcs.go b/sdk/helper/template/funcs.go index 6d68cab3a7e44..ee9927fe15cbb 100644 --- a/sdk/helper/template/funcs.go +++ b/sdk/helper/template/funcs.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package template import ( diff --git a/sdk/helper/template/funcs_test.go b/sdk/helper/template/funcs_test.go index 4965115960eeb..f682a96753f2f 100644 --- a/sdk/helper/template/funcs_test.go +++ b/sdk/helper/template/funcs_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package template import ( diff --git a/sdk/helper/template/template.go b/sdk/helper/template/template.go index dea65f3f5ed36..2918825b978ec 100644 --- a/sdk/helper/template/template.go +++ b/sdk/helper/template/template.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package template import ( diff --git a/sdk/helper/template/template_test.go b/sdk/helper/template/template_test.go index 2f66bf36fe035..715dd52519e81 100644 --- a/sdk/helper/template/template_test.go +++ b/sdk/helper/template/template_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package template import ( diff --git a/sdk/helper/testcluster/consts.go b/sdk/helper/testcluster/consts.go deleted file mode 100644 index e85ef196a2215..0000000000000 --- a/sdk/helper/testcluster/consts.go +++ /dev/null @@ -1,12 +0,0 @@ -package testcluster - -const ( - // EnvVaultLicenseCI is the name of an environment variable that contains - // a signed license string used for Vault Enterprise binary-based tests. - // The binary will be run with the env var VAULT_LICENSE set to this value. - EnvVaultLicenseCI = "VAULT_LICENSE_CI" - - // DefaultCAFile is the path to the CA file. This is a docker-specific - // constant. TODO: needs to be moved to a more relevant place - DefaultCAFile = "/vault/config/ca.pem" -) diff --git a/sdk/helper/testcluster/docker/cert.go b/sdk/helper/testcluster/docker/cert.go deleted file mode 100644 index 4704030cb52f0..0000000000000 --- a/sdk/helper/testcluster/docker/cert.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package docker - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "sync" - - "github.com/hashicorp/errwrap" -) - -// ReloadFunc are functions that are called when a reload is requested -type ReloadFunc func() error - -// CertificateGetter satisfies ReloadFunc and its GetCertificate method -// satisfies the tls.GetCertificate function signature. Currently it does not -// allow changing paths after the fact. -type CertificateGetter struct { - sync.RWMutex - - cert *tls.Certificate - - certFile string - keyFile string - passphrase string -} - -func NewCertificateGetter(certFile, keyFile, passphrase string) *CertificateGetter { - return &CertificateGetter{ - certFile: certFile, - keyFile: keyFile, - passphrase: passphrase, - } -} - -func (cg *CertificateGetter) Reload() error { - certPEMBlock, err := ioutil.ReadFile(cg.certFile) - if err != nil { - return err - } - keyPEMBlock, err := ioutil.ReadFile(cg.keyFile) - if err != nil { - return err - } - - // Check for encrypted pem block - keyBlock, _ := pem.Decode(keyPEMBlock) - if keyBlock == nil { - return errors.New("decoded PEM is blank") - } - - if x509.IsEncryptedPEMBlock(keyBlock) { - keyBlock.Bytes, err = x509.DecryptPEMBlock(keyBlock, []byte(cg.passphrase)) - if err != nil { - return errwrap.Wrapf("Decrypting PEM block failed {{err}}", err) - } - keyPEMBlock = pem.EncodeToMemory(keyBlock) - } - - cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return err - } - - cg.Lock() - defer cg.Unlock() - - cg.cert = &cert - - return nil -} - -func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - cg.RLock() - defer cg.RUnlock() - - if cg.cert == nil { - return nil, fmt.Errorf("nil certificate") - } - - return cg.cert, nil -} diff --git a/sdk/helper/testcluster/docker/environment.go b/sdk/helper/testcluster/docker/environment.go deleted file mode 100644 index 204bacbdbddce..0000000000000 --- a/sdk/helper/testcluster/docker/environment.go +++ /dev/null @@ -1,1062 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package docker - -import ( - "bufio" - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/hex" - "encoding/json" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "math/big" - mathrand "math/rand" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/volume" - docker "github.com/docker/docker/client" - "github.com/hashicorp/go-cleanhttp" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/api" - dockhelper "github.com/hashicorp/vault/sdk/helper/docker" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/helper/testcluster" - uberAtomic "go.uber.org/atomic" - "golang.org/x/net/http2" -) - -var ( - _ testcluster.VaultCluster = &DockerCluster{} - _ testcluster.VaultClusterNode = &DockerClusterNode{} -) - -const MaxClusterNameLength = 52 - -// DockerCluster is used to managing the lifecycle of the test Vault cluster -type DockerCluster struct { - ClusterName string - - RaftStorage bool - ClusterNodes []*DockerClusterNode - - // Certificate fields - *testcluster.CA - RootCAs *x509.CertPool - - barrierKeys [][]byte - recoveryKeys [][]byte - tmpDir string - - // rootToken is the initial root token created when the Vault cluster is - // created. - rootToken string - DockerAPI *docker.Client - ID string - Logger log.Logger - builtTags map[string]struct{} -} - -func (dc *DockerCluster) NamedLogger(s string) log.Logger { - return dc.Logger.Named(s) -} - -func (dc *DockerCluster) ClusterID() string { - return dc.ID -} - -func (dc *DockerCluster) Nodes() []testcluster.VaultClusterNode { - ret := make([]testcluster.VaultClusterNode, len(dc.ClusterNodes)) - for i := range dc.ClusterNodes { - ret[i] = dc.ClusterNodes[i] - } - return ret -} - -func (dc *DockerCluster) GetBarrierKeys() [][]byte { - return dc.barrierKeys -} - -func testKeyCopy(key []byte) []byte { - result := make([]byte, len(key)) - copy(result, key) - return result -} - -func (dc *DockerCluster) GetRecoveryKeys() [][]byte { - ret := make([][]byte, len(dc.recoveryKeys)) - for i, k := range dc.recoveryKeys { - ret[i] = testKeyCopy(k) - } - return ret -} - -func (dc *DockerCluster) GetBarrierOrRecoveryKeys() [][]byte { - return dc.GetBarrierKeys() -} - -func (dc *DockerCluster) SetBarrierKeys(keys [][]byte) { - dc.barrierKeys = make([][]byte, len(keys)) - for i, k := range keys { - dc.barrierKeys[i] = testKeyCopy(k) - } -} - -func (dc *DockerCluster) SetRecoveryKeys(keys [][]byte) { - dc.recoveryKeys = make([][]byte, len(keys)) - for i, k := range keys { - dc.recoveryKeys[i] = testKeyCopy(k) - } -} - -func (dc *DockerCluster) GetCACertPEMFile() string { - return dc.CACertPEMFile -} - -func (dc *DockerCluster) Cleanup() { - dc.cleanup() -} - -func (dc *DockerCluster) cleanup() error { - var result *multierror.Error - for _, node := range dc.ClusterNodes { - if err := node.cleanup(); err != nil { - result = multierror.Append(result, err) - } - } - - return result.ErrorOrNil() -} - -// GetRootToken returns the root token of the cluster, if set -func (dc *DockerCluster) GetRootToken() string { - return dc.rootToken -} - -func (dc *DockerCluster) SetRootToken(s string) { - dc.Logger.Trace("cluster root token changed", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", s)) - dc.rootToken = s -} - -func (n *DockerClusterNode) Name() string { - return n.Cluster.ClusterName + "-" + n.NodeID -} - -func (dc *DockerCluster) setupNode0(ctx context.Context) error { - client := dc.ClusterNodes[0].client - - var resp *api.InitResponse - var err error - for ctx.Err() == nil { - resp, err = client.Sys().Init(&api.InitRequest{ - SecretShares: 3, - SecretThreshold: 3, - }) - if err == nil && resp != nil { - break - } - time.Sleep(500 * time.Millisecond) - } - if err != nil { - return err - } - if resp == nil { - return fmt.Errorf("nil response to init request") - } - - for _, k := range resp.Keys { - raw, err := hex.DecodeString(k) - if err != nil { - return err - } - dc.barrierKeys = append(dc.barrierKeys, raw) - } - - for _, k := range resp.RecoveryKeys { - raw, err := hex.DecodeString(k) - if err != nil { - return err - } - dc.recoveryKeys = append(dc.recoveryKeys, raw) - } - - dc.rootToken = resp.RootToken - client.SetToken(dc.rootToken) - dc.ClusterNodes[0].client = client - - err = testcluster.UnsealNode(ctx, dc, 0) - if err != nil { - return err - } - - err = ensureLeaderMatches(ctx, client, func(leader *api.LeaderResponse) error { - if !leader.IsSelf { - return fmt.Errorf("node %d leader=%v, expected=%v", 0, leader.IsSelf, true) - } - - return nil - }) - - status, err := client.Sys().SealStatusWithContext(ctx) - if err != nil { - return err - } - dc.ID = status.ClusterID - return err -} - -func (dc *DockerCluster) clusterReady(ctx context.Context) error { - for i, node := range dc.ClusterNodes { - expectLeader := i == 0 - err := ensureLeaderMatches(ctx, node.client, func(leader *api.LeaderResponse) error { - if expectLeader != leader.IsSelf { - return fmt.Errorf("node %d leader=%v, expected=%v", i, leader.IsSelf, expectLeader) - } - - return nil - }) - if err != nil { - return err - } - } - - return nil -} - -func (dc *DockerCluster) setupCA(opts *DockerClusterOptions) error { - var err error - var ca testcluster.CA - - if opts != nil && opts.CAKey != nil { - ca.CAKey = opts.CAKey - } else { - ca.CAKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return err - } - } - - var caBytes []byte - if opts != nil && len(opts.CACert) > 0 { - caBytes = opts.CACert - } else { - serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() - CACertTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "localhost", - }, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - SerialNumber: big.NewInt(serialNumber), - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - BasicConstraintsValid: true, - IsCA: true, - } - caBytes, err = x509.CreateCertificate(rand.Reader, CACertTemplate, CACertTemplate, ca.CAKey.Public(), ca.CAKey) - if err != nil { - return err - } - } - CACert, err := x509.ParseCertificate(caBytes) - if err != nil { - return err - } - ca.CACert = CACert - ca.CACertBytes = caBytes - - CACertPEMBlock := &pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - } - ca.CACertPEM = pem.EncodeToMemory(CACertPEMBlock) - - ca.CACertPEMFile = filepath.Join(dc.tmpDir, "ca", "ca.pem") - err = os.WriteFile(ca.CACertPEMFile, ca.CACertPEM, 0o755) - if err != nil { - return err - } - - marshaledCAKey, err := x509.MarshalECPrivateKey(ca.CAKey) - if err != nil { - return err - } - CAKeyPEMBlock := &pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: marshaledCAKey, - } - ca.CAKeyPEM = pem.EncodeToMemory(CAKeyPEMBlock) - - dc.CA = &ca - - return nil -} - -func (n *DockerClusterNode) setupCert(ip string) error { - var err error - - n.ServerKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return err - } - - serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() - certTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: n.Name(), - }, - DNSNames: []string{"localhost", n.Name()}, - IPAddresses: []net.IP{net.IPv6loopback, net.ParseIP("127.0.0.1"), net.ParseIP(ip)}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, - SerialNumber: big.NewInt(serialNumber), - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - } - n.ServerCertBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, n.Cluster.CACert, n.ServerKey.Public(), n.Cluster.CAKey) - if err != nil { - return err - } - n.ServerCert, err = x509.ParseCertificate(n.ServerCertBytes) - if err != nil { - return err - } - n.ServerCertPEM = pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: n.ServerCertBytes, - }) - - marshaledKey, err := x509.MarshalECPrivateKey(n.ServerKey) - if err != nil { - return err - } - n.ServerKeyPEM = pem.EncodeToMemory(&pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: marshaledKey, - }) - - n.ServerCertPEMFile = filepath.Join(n.WorkDir, "cert.pem") - err = os.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0o755) - if err != nil { - return err - } - - n.ServerKeyPEMFile = filepath.Join(n.WorkDir, "key.pem") - err = os.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0o755) - if err != nil { - return err - } - - tlsCert, err := tls.X509KeyPair(n.ServerCertPEM, n.ServerKeyPEM) - if err != nil { - return err - } - - certGetter := NewCertificateGetter(n.ServerCertPEMFile, n.ServerKeyPEMFile, "") - if err := certGetter.Reload(); err != nil { - return err - } - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{tlsCert}, - RootCAs: n.Cluster.RootCAs, - ClientCAs: n.Cluster.RootCAs, - ClientAuth: tls.RequestClientCert, - NextProtos: []string{"h2", "http/1.1"}, - GetCertificate: certGetter.GetCertificate, - } - - n.tlsConfig = tlsConfig - - err = os.WriteFile(filepath.Join(n.WorkDir, "ca.pem"), n.Cluster.CACertPEM, 0o755) - if err != nil { - return err - } - return nil -} - -func NewTestDockerCluster(t *testing.T, opts *DockerClusterOptions) *DockerCluster { - if opts == nil { - opts = &DockerClusterOptions{} - } - if opts.ClusterName == "" { - opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") - } - if opts.Logger == nil { - opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) - } - if opts.NetworkName == "" { - opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") - } - if opts.VaultLicense == "" { - opts.VaultLicense = os.Getenv(testcluster.EnvVaultLicenseCI) - } - - ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) - t.Cleanup(cancel) - - dc, err := NewDockerCluster(ctx, opts) - if err != nil { - t.Fatal(err) - } - dc.Logger.Trace("cluster started", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", dc.GetRootToken())) - return dc -} - -func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerCluster, error) { - api, err := dockhelper.NewDockerAPI() - if err != nil { - return nil, err - } - - if opts == nil { - opts = &DockerClusterOptions{} - } - if opts.Logger == nil { - opts.Logger = log.NewNullLogger() - } - - dc := &DockerCluster{ - DockerAPI: api, - RaftStorage: true, - ClusterName: opts.ClusterName, - Logger: opts.Logger, - builtTags: map[string]struct{}{}, - CA: opts.CA, - } - - if err := dc.setupDockerCluster(ctx, opts); err != nil { - dc.Cleanup() - return nil, err - } - - return dc, nil -} - -// DockerClusterNode represents a single instance of Vault in a cluster -type DockerClusterNode struct { - NodeID string - HostPort string - client *api.Client - ServerCert *x509.Certificate - ServerCertBytes []byte - ServerCertPEM []byte - ServerCertPEMFile string - ServerKey *ecdsa.PrivateKey - ServerKeyPEM []byte - ServerKeyPEMFile string - tlsConfig *tls.Config - WorkDir string - Cluster *DockerCluster - Container *types.ContainerJSON - DockerAPI *docker.Client - runner *dockhelper.Runner - Logger log.Logger - cleanupContainer func() - RealAPIAddr string - ContainerNetworkName string - ContainerIPAddress string - ImageRepo string - ImageTag string - DataVolumeName string - cleanupVolume func() -} - -func (n *DockerClusterNode) TLSConfig() *tls.Config { - return n.tlsConfig.Clone() -} - -func (n *DockerClusterNode) APIClient() *api.Client { - // We clone to ensure that whenever this method is called, the caller gets - // back a pristine client, without e.g. any namespace or token changes that - // might pollute a shared client. We clone the config instead of the - // client because (1) Client.clone propagates the replicationStateStore and - // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and - // (3) if clone returns an error, it doesn't feel as appropriate to panic - // below. Who knows why clone might return an error? - cfg := n.client.CloneConfig() - client, err := api.NewClient(cfg) - if err != nil { - // It seems fine to panic here, since this should be the same input - // we provided to NewClient when we were setup, and we didn't panic then. - // Better not to completely ignore the error though, suppose there's a - // bug in CloneConfig? - panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) - } - client.SetToken(n.Cluster.rootToken) - return client -} - -// NewAPIClient creates and configures a Vault API client to communicate with -// the running Vault Cluster for this DockerClusterNode -func (n *DockerClusterNode) apiConfig() (*api.Config, error) { - transport := cleanhttp.DefaultPooledTransport() - transport.TLSClientConfig = n.TLSConfig() - if err := http2.ConfigureTransport(transport); err != nil { - return nil, err - } - client := &http.Client{ - Transport: transport, - CheckRedirect: func(*http.Request, []*http.Request) error { - // This can of course be overridden per-test by using its own client - return fmt.Errorf("redirects not allowed in these tests") - }, - } - config := api.DefaultConfig() - if config.Error != nil { - return nil, config.Error - } - config.Address = fmt.Sprintf("https://%s", n.HostPort) - config.HttpClient = client - config.MaxRetries = 0 - return config, nil -} - -func (n *DockerClusterNode) newAPIClient() (*api.Client, error) { - config, err := n.apiConfig() - if err != nil { - return nil, err - } - client, err := api.NewClient(config) - if err != nil { - return nil, err - } - client.SetToken(n.Cluster.GetRootToken()) - return client, nil -} - -// Cleanup kills the container of the node and deletes its data volume -func (n *DockerClusterNode) Cleanup() { - n.cleanup() -} - -// Stop kills the container of the node -func (n *DockerClusterNode) Stop() { - n.cleanupContainer() -} - -func (n *DockerClusterNode) cleanup() error { - if n.Container == nil || n.Container.ID == "" { - return nil - } - n.cleanupContainer() - n.cleanupVolume() - return nil -} - -func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error { - if n.DataVolumeName == "" { - vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{}) - if err != nil { - return err - } - n.DataVolumeName = vol.Name - n.cleanupVolume = func() { - _ = n.DockerAPI.VolumeRemove(ctx, vol.Name, false) - } - } - vaultCfg := map[string]interface{}{} - vaultCfg["listener"] = map[string]interface{}{ - "tcp": map[string]interface{}{ - "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200), - "tls_cert_file": "/vault/config/cert.pem", - "tls_key_file": "/vault/config/key.pem", - "telemetry": map[string]interface{}{ - "unauthenticated_metrics_access": true, - }, - }, - } - vaultCfg["telemetry"] = map[string]interface{}{ - "disable_hostname": true, - } - raftOpts := map[string]interface{}{ - // TODO add options from vnc - "path": "/vault/file", - "node_id": n.NodeID, - } - vaultCfg["storage"] = map[string]interface{}{ - "raft": raftOpts, - } - if opts != nil && opts.VaultNodeConfig != nil && len(opts.VaultNodeConfig.StorageOptions) > 0 { - for k, v := range opts.VaultNodeConfig.StorageOptions { - if _, ok := raftOpts[k].(string); !ok { - raftOpts[k] = v - } - } - } - //// disable_mlock is required for working in the Docker environment with - //// custom plugins - vaultCfg["disable_mlock"] = true - vaultCfg["api_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8200` - vaultCfg["cluster_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8201` - - systemJSON, err := json.Marshal(vaultCfg) - if err != nil { - return err - } - err = os.WriteFile(filepath.Join(n.WorkDir, "system.json"), systemJSON, 0o644) - if err != nil { - return err - } - - if opts.VaultNodeConfig != nil { - localCfg := *opts.VaultNodeConfig - if opts.VaultNodeConfig.LicensePath != "" { - b, err := os.ReadFile(opts.VaultNodeConfig.LicensePath) - if err != nil || len(b) == 0 { - return fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err) - } - localCfg.LicensePath = "/vault/config/license" - dest := filepath.Join(n.WorkDir, "license") - err = os.WriteFile(dest, b, 0o644) - if err != nil { - return fmt.Errorf("error writing license to %q: %w", dest, err) - } - - } - userJSON, err := json.Marshal(localCfg) - if err != nil { - return err - } - err = os.WriteFile(filepath.Join(n.WorkDir, "user.json"), userJSON, 0o644) - if err != nil { - return err - } - } - - // Create a temporary cert so vault will start up - err = n.setupCert("127.0.0.1") - if err != nil { - return err - } - - caDir := filepath.Join(n.Cluster.tmpDir, "ca") - - // setup plugin bin copy if needed - copyFromTo := map[string]string{ - n.WorkDir: "/vault/config", - caDir: "/usr/local/share/ca-certificates/", - } - - var wg sync.WaitGroup - wg.Add(1) - var seenLogs uberAtomic.Bool - logConsumer := func(s string) { - if seenLogs.CAS(false, true) { - wg.Done() - } - n.Logger.Trace(s) - } - logStdout := &LogConsumerWriter{logConsumer} - logStderr := &LogConsumerWriter{func(s string) { - if seenLogs.CAS(false, true) { - wg.Done() - } - testcluster.JSONLogNoTimestamp(n.Logger, s) - }} - r, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{ - ImageRepo: n.ImageRepo, - ImageTag: n.ImageTag, - // We don't need to run update-ca-certificates in the container, because - // we're providing the CA in the raft join call, and otherwise Vault - // servers don't talk to one another on the API port. - Cmd: append([]string{"server"}, opts.Args...), - Env: []string{ - // For now we're using disable_mlock, because this is for testing - // anyway, and because it prevents us using external plugins. - "SKIP_SETCAP=true", - "VAULT_LOG_FORMAT=json", - "VAULT_LICENSE=" + opts.VaultLicense, - }, - Ports: []string{"8200/tcp", "8201/tcp"}, - ContainerName: n.Name(), - NetworkName: opts.NetworkName, - CopyFromTo: copyFromTo, - LogConsumer: logConsumer, - LogStdout: logStdout, - LogStderr: logStderr, - PreDelete: true, - DoNotAutoRemove: true, - PostStart: func(containerID string, realIP string) error { - err := n.setupCert(realIP) - if err != nil { - return err - } - - // If we signal Vault before it installs its sighup handler, it'll die. - wg.Wait() - n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP) - return n.runner.RefreshFiles(ctx, containerID) - }, - Capabilities: []string{"NET_ADMIN"}, - OmitLogTimestamps: true, - VolumeNameToMountPoint: map[string]string{ - n.DataVolumeName: "/vault/file", - }, - }) - if err != nil { - return err - } - n.runner = r - - probe := opts.StartProbe - if probe == nil { - probe = func(c *api.Client) error { - _, err = c.Sys().SealStatus() - return err - } - } - svc, _, err := r.StartNewService(ctx, false, false, func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) { - config, err := n.apiConfig() - if err != nil { - return nil, err - } - config.Address = fmt.Sprintf("https://%s:%d", host, port) - client, err := api.NewClient(config) - if err != nil { - return nil, err - } - err = probe(client) - if err != nil { - return nil, err - } - - return dockhelper.NewServiceHostPort(host, port), nil - }) - if err != nil { - return err - } - - n.HostPort = svc.Config.Address() - n.Container = svc.Container - netName := opts.NetworkName - if netName == "" { - if len(svc.Container.NetworkSettings.Networks) > 1 { - return fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", svc.Container.NetworkSettings.Networks) - } - for netName = range svc.Container.NetworkSettings.Networks { - // Networks above is a map; we just need to find the first and - // only key of this map (network name). The range handles this - // for us, but we need a loop construction in order to use range. - } - } - n.ContainerNetworkName = netName - n.ContainerIPAddress = svc.Container.NetworkSettings.Networks[netName].IPAddress - n.RealAPIAddr = "https://" + n.ContainerIPAddress + ":8200" - n.cleanupContainer = svc.Cleanup - - client, err := n.newAPIClient() - if err != nil { - return err - } - client.SetToken(n.Cluster.rootToken) - n.client = client - return nil -} - -func (n *DockerClusterNode) Pause(ctx context.Context) error { - return n.DockerAPI.ContainerPause(ctx, n.Container.ID) -} - -func (n *DockerClusterNode) AddNetworkDelay(ctx context.Context, delay time.Duration, targetIP string) error { - ip := net.ParseIP(targetIP) - if ip == nil { - return fmt.Errorf("targetIP %q is not an IP address", targetIP) - } - // Let's attempt to get a unique handle for the filter rule; we'll assume that - // every targetIP has a unique last octet, which is true currently for how - // we're doing docker networking. - lastOctet := ip.To4()[3] - - stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ - "/bin/sh", - "-xec", strings.Join([]string{ - fmt.Sprintf("echo isolating node %s", targetIP), - "apk add iproute2", - // If we're running this script a second time on the same node, - // the add dev will fail; since we only want to run the netem - // command once, we'll do so in the case where the add dev doesn't fail. - "tc qdisc add dev eth0 root handle 1: prio && " + - fmt.Sprintf("tc qdisc add dev eth0 parent 1:1 handle 2: netem delay %dms", delay/time.Millisecond), - // Here we create a u32 filter as per https://man7.org/linux/man-pages/man8/tc-u32.8.html - // Its parent is 1:0 (which I guess is the root?) - // Its handle must be unique, so we base it on targetIP - fmt.Sprintf("tc filter add dev eth0 parent 1:0 protocol ip pref 55 handle ::%x u32 match ip dst %s flowid 2:1", lastOctet, targetIP), - }, "; "), - }) - if err != nil { - return err - } - - n.Logger.Trace(string(stdout)) - n.Logger.Trace(string(stderr)) - if exitCode != 0 { - return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) - } - return nil -} - -type LogConsumerWriter struct { - consumer func(string) -} - -func (l LogConsumerWriter) Write(p []byte) (n int, err error) { - // TODO this assumes that we're never passed partial log lines, which - // seems a safe assumption for now based on how docker looks to implement - // logging, but might change in the future. - scanner := bufio.NewScanner(bytes.NewReader(p)) - scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) - for scanner.Scan() { - l.consumer(scanner.Text()) - } - return len(p), nil -} - -// DockerClusterOptions has options for setting up the docker cluster -type DockerClusterOptions struct { - testcluster.ClusterOptions - CAKey *ecdsa.PrivateKey - NetworkName string - ImageRepo string - ImageTag string - CA *testcluster.CA - VaultBinary string - Args []string - StartProbe func(*api.Client) error -} - -func ensureLeaderMatches(ctx context.Context, client *api.Client, ready func(response *api.LeaderResponse) error) error { - var leader *api.LeaderResponse - var err error - for ctx.Err() == nil { - leader, err = client.Sys().Leader() - switch { - case err != nil: - case leader == nil: - err = fmt.Errorf("nil response to leader check") - default: - err = ready(leader) - if err == nil { - return nil - } - } - time.Sleep(500 * time.Millisecond) - } - return fmt.Errorf("error checking leader: %v", err) -} - -const DefaultNumCores = 3 - -// creates a managed docker container running Vault -func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClusterOptions) error { - if opts.TmpDir != "" { - if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { - if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { - return err - } - } - dc.tmpDir = opts.TmpDir - } else { - tempDir, err := ioutil.TempDir("", "vault-test-cluster-") - if err != nil { - return err - } - dc.tmpDir = tempDir - } - caDir := filepath.Join(dc.tmpDir, "ca") - if err := os.MkdirAll(caDir, 0o755); err != nil { - return err - } - - var numCores int - if opts.NumCores == 0 { - numCores = DefaultNumCores - } else { - numCores = opts.NumCores - } - - if dc.CA == nil { - if err := dc.setupCA(opts); err != nil { - return err - } - } - dc.RootCAs = x509.NewCertPool() - dc.RootCAs.AddCert(dc.CA.CACert) - - for i := 0; i < numCores; i++ { - if err := dc.addNode(ctx, opts); err != nil { - return err - } - if opts.SkipInit { - continue - } - if i == 0 { - if err := dc.setupNode0(ctx); err != nil { - return nil - } - } else { - if err := dc.joinNode(ctx, i, 0); err != nil { - return err - } - } - } - - return nil -} - -func (dc *DockerCluster) AddNode(ctx context.Context, opts *DockerClusterOptions) error { - leaderIdx, err := testcluster.LeaderNode(ctx, dc) - if err != nil { - return err - } - if err := dc.addNode(ctx, opts); err != nil { - return err - } - - return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx) -} - -func (dc *DockerCluster) addNode(ctx context.Context, opts *DockerClusterOptions) error { - tag, err := dc.setupImage(ctx, opts) - if err != nil { - return err - } - i := len(dc.ClusterNodes) - nodeID := fmt.Sprintf("core-%d", i) - node := &DockerClusterNode{ - DockerAPI: dc.DockerAPI, - NodeID: nodeID, - Cluster: dc, - WorkDir: filepath.Join(dc.tmpDir, nodeID), - Logger: dc.Logger.Named(nodeID), - ImageRepo: opts.ImageRepo, - ImageTag: tag, - } - dc.ClusterNodes = append(dc.ClusterNodes, node) - if err := os.MkdirAll(node.WorkDir, 0o755); err != nil { - return err - } - if err := node.Start(ctx, opts); err != nil { - return err - } - return nil -} - -func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int) error { - leader := dc.ClusterNodes[leaderIdx] - - if nodeIdx >= len(dc.ClusterNodes) { - return fmt.Errorf("invalid node %d", nodeIdx) - } - node := dc.ClusterNodes[nodeIdx] - client := node.APIClient() - - var resp *api.RaftJoinResponse - resp, err := client.Sys().RaftJoinWithContext(ctx, &api.RaftJoinRequest{ - // When running locally on a bridge network, the containers must use their - // actual (private) IP to talk to one another. Our code must instead use - // the portmapped address since we're not on their network in that case. - LeaderAPIAddr: leader.RealAPIAddr, - LeaderCACert: string(dc.CACertPEM), - LeaderClientCert: string(node.ServerCertPEM), - LeaderClientKey: string(node.ServerKeyPEM), - }) - if resp == nil || !resp.Joined { - return fmt.Errorf("nil or negative response from raft join request: %v", resp) - } - if err != nil { - return fmt.Errorf("failed to join cluster: %w", err) - } - - return testcluster.UnsealNode(ctx, dc, nodeIdx) -} - -func (dc *DockerCluster) setupImage(ctx context.Context, opts *DockerClusterOptions) (string, error) { - if opts == nil { - opts = &DockerClusterOptions{} - } - sourceTag := opts.ImageTag - if sourceTag == "" { - sourceTag = "latest" - } - - if opts.VaultBinary == "" { - return sourceTag, nil - } - - suffix := "testing" - if sha := os.Getenv("COMMIT_SHA"); sha != "" { - suffix = sha - } - tag := sourceTag + "-" + suffix - if _, ok := dc.builtTags[tag]; ok { - return tag, nil - } - - f, err := os.Open(opts.VaultBinary) - if err != nil { - return "", err - } - data, err := io.ReadAll(f) - if err != nil { - return "", err - } - bCtx := dockhelper.NewBuildContext() - bCtx["vault"] = &dockhelper.FileContents{ - Data: data, - Mode: 0o755, - } - - containerFile := fmt.Sprintf(` -FROM %s:%s -COPY vault /bin/vault -`, opts.ImageRepo, sourceTag) - - _, err = dockhelper.BuildImage(ctx, dc.DockerAPI, containerFile, bCtx, - dockhelper.BuildRemove(true), dockhelper.BuildForceRemove(true), - dockhelper.BuildPullParent(true), - dockhelper.BuildTags([]string{opts.ImageRepo + ":" + tag})) - if err != nil { - return "", err - } - dc.builtTags[tag] = struct{}{} - return tag, nil -} - -/* Notes on testing the non-bridge network case: -- you need the test itself to be running in a container so that it can use - the network; create the network using - docker network create testvault -- this means that you need to mount the docker socket in that test container, - but on macos there's stuff that prevents that from working; to hack that, - on the host run - sudo ln -s "$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock" /var/run/docker.sock.raw -- run the test container like - docker run --rm -it --network testvault \ - -v /var/run/docker.sock.raw:/var/run/docker.sock \ - -v $(pwd):/home/circleci/go/src/github.com/hashicorp/vault/ \ - -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - "docker.mirror.hashicorp.services/cimg/go:1.19.2" /bin/bash -- in the container you may need to chown/chmod /var/run/docker.sock; use `docker ps` - to test if it's working - -*/ diff --git a/sdk/helper/testcluster/docker/replication.go b/sdk/helper/testcluster/docker/replication.go deleted file mode 100644 index c313e7af4d8d2..0000000000000 --- a/sdk/helper/testcluster/docker/replication.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package docker - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/helper/testcluster" -) - -func DefaultOptions(t *testing.T) *DockerClusterOptions { - return &DockerClusterOptions{ - ImageRepo: "hashicorp/vault", - ImageTag: "latest", - VaultBinary: os.Getenv("VAULT_BINARY"), - ClusterOptions: testcluster.ClusterOptions{ - NumCores: 3, - ClusterName: strings.ReplaceAll(t.Name(), "/", "-"), - VaultNodeConfig: &testcluster.VaultNodeConfig{ - LogLevel: "TRACE", - }, - }, - } -} - -func NewReplicationSetDocker(t *testing.T, opts *DockerClusterOptions) (*testcluster.ReplicationSet, error) { - binary := os.Getenv("VAULT_BINARY") - if binary == "" { - t.Skip("only running docker test when $VAULT_BINARY present") - } - - r := &testcluster.ReplicationSet{ - Clusters: map[string]testcluster.VaultCluster{}, - Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()), - } - - // clusterName is used for container name as well. - // A container name should not exceed 64 chars. - // There are additional chars that are added to the name as well - // like "-A-core0". So, setting a max limit for a cluster name. - if len(opts.ClusterName) > MaxClusterNameLength { - return nil, fmt.Errorf("cluster name length exceeded the maximum allowed length of %v", MaxClusterNameLength) - } - - r.Builder = func(ctx context.Context, name string, baseLogger hclog.Logger) (testcluster.VaultCluster, error) { - myOpts := *opts - myOpts.Logger = baseLogger.Named(name) - if myOpts.ClusterName == "" { - myOpts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") - } - myOpts.ClusterName += "-" + strings.ReplaceAll(name, "/", "-") - myOpts.CA = r.CA - return NewTestDockerCluster(t, &myOpts), nil - } - - a, err := r.Builder(context.TODO(), "A", r.Logger) - if err != nil { - return nil, err - } - r.Clusters["A"] = a - r.CA = a.(*DockerCluster).CA - - return r, err -} diff --git a/sdk/helper/testcluster/exec.go b/sdk/helper/testcluster/exec.go deleted file mode 100644 index d91a3de034ac6..0000000000000 --- a/sdk/helper/testcluster/exec.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testcluster - -import ( - "bufio" - "context" - "crypto/tls" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" - "time" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -type ExecDevCluster struct { - ID string - ClusterName string - ClusterNodes []*execDevClusterNode - CACertPEMFile string - barrierKeys [][]byte - recoveryKeys [][]byte - tmpDir string - clientAuthRequired bool - rootToken string - stop func() - stopCh chan struct{} - Logger log.Logger -} - -func (dc *ExecDevCluster) SetRootToken(token string) { - dc.rootToken = token -} - -func (dc *ExecDevCluster) NamedLogger(s string) log.Logger { - return dc.Logger.Named(s) -} - -var _ VaultCluster = &ExecDevCluster{} - -type ExecDevClusterOptions struct { - ClusterOptions - BinaryPath string - // this is -dev-listen-address, defaults to "127.0.0.1:8200" - BaseListenAddress string -} - -func NewTestExecDevCluster(t *testing.T, opts *ExecDevClusterOptions) *ExecDevCluster { - if opts == nil { - opts = &ExecDevClusterOptions{} - } - if opts.ClusterName == "" { - opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") - } - if opts.Logger == nil { - opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) // .Named("container") - } - if opts.VaultLicense == "" { - opts.VaultLicense = os.Getenv(EnvVaultLicenseCI) - } - - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - t.Cleanup(cancel) - - dc, err := NewExecDevCluster(ctx, opts) - if err != nil { - t.Fatal(err) - } - return dc -} - -func NewExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (*ExecDevCluster, error) { - dc := &ExecDevCluster{ - ClusterName: opts.ClusterName, - stopCh: make(chan struct{}), - } - - if opts == nil { - opts = &ExecDevClusterOptions{} - } - if opts.NumCores == 0 { - opts.NumCores = 3 - } - if err := dc.setupExecDevCluster(ctx, opts); err != nil { - dc.Cleanup() - return nil, err - } - - return dc, nil -} - -func (dc *ExecDevCluster) setupExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (retErr error) { - if opts == nil { - opts = &ExecDevClusterOptions{} - } - if opts.Logger == nil { - opts.Logger = log.NewNullLogger() - } - dc.Logger = opts.Logger - - if opts.TmpDir != "" { - if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { - if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { - return err - } - } - dc.tmpDir = opts.TmpDir - } else { - tempDir, err := os.MkdirTemp("", "vault-test-cluster-") - if err != nil { - return err - } - dc.tmpDir = tempDir - } - - // This context is used to stop the subprocess - execCtx, cancel := context.WithCancel(context.Background()) - dc.stop = func() { - cancel() - close(dc.stopCh) - } - defer func() { - if retErr != nil { - cancel() - } - }() - - bin := opts.BinaryPath - if bin == "" { - bin = "vault" - } - - clusterJsonPath := filepath.Join(dc.tmpDir, "cluster.json") - args := []string{"server", "-dev", "-dev-cluster-json", clusterJsonPath} - switch { - case opts.NumCores == 3: - args = append(args, "-dev-three-node") - case opts.NumCores == 1: - args = append(args, "-dev-tls") - default: - return fmt.Errorf("NumCores=1 and NumCores=3 are the only supported options right now") - } - if opts.BaseListenAddress != "" { - args = append(args, "-dev-listen-address", opts.BaseListenAddress) - } - cmd := exec.CommandContext(execCtx, bin, args...) - cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, "VAULT_LICENSE="+opts.VaultLicense) - cmd.Env = append(cmd.Env, "VAULT_LOG_FORMAT=json") - cmd.Env = append(cmd.Env, "VAULT_DEV_TEMP_DIR="+dc.tmpDir) - if opts.Logger != nil { - stdout, err := cmd.StdoutPipe() - if err != nil { - return err - } - go func() { - outlog := opts.Logger.Named("stdout") - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - outlog.Trace(scanner.Text()) - } - }() - stderr, err := cmd.StderrPipe() - if err != nil { - return err - } - go func() { - errlog := opts.Logger.Named("stderr") - scanner := bufio.NewScanner(stderr) - // The default buffer is 4k, and Vault can emit bigger log lines - scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) - for scanner.Scan() { - JSONLogNoTimestamp(errlog, scanner.Text()) - } - }() - } - - if err := cmd.Start(); err != nil { - return err - } - - for ctx.Err() == nil { - if b, err := os.ReadFile(clusterJsonPath); err == nil && len(b) > 0 { - var clusterJson ClusterJson - if err := jsonutil.DecodeJSON(b, &clusterJson); err != nil { - continue - } - dc.CACertPEMFile = clusterJson.CACertPath - dc.rootToken = clusterJson.RootToken - for i, node := range clusterJson.Nodes { - config := api.DefaultConfig() - config.Address = node.APIAddress - err := config.ConfigureTLS(&api.TLSConfig{ - CACert: clusterJson.CACertPath, - }) - if err != nil { - return err - } - client, err := api.NewClient(config) - if err != nil { - return err - } - client.SetToken(dc.rootToken) - _, err = client.Sys().ListMounts() - if err != nil { - return err - } - - dc.ClusterNodes = append(dc.ClusterNodes, &execDevClusterNode{ - name: fmt.Sprintf("core-%d", i), - client: client, - }) - } - return nil - } - time.Sleep(500 * time.Millisecond) - } - return ctx.Err() -} - -type execDevClusterNode struct { - name string - client *api.Client -} - -var _ VaultClusterNode = &execDevClusterNode{} - -func (e *execDevClusterNode) Name() string { - return e.name -} - -func (e *execDevClusterNode) APIClient() *api.Client { - // We clone to ensure that whenever this method is called, the caller gets - // back a pristine client, without e.g. any namespace or token changes that - // might pollute a shared client. We clone the config instead of the - // client because (1) Client.clone propagates the replicationStateStore and - // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and - // (3) if clone returns an error, it doesn't feel as appropriate to panic - // below. Who knows why clone might return an error? - cfg := e.client.CloneConfig() - client, err := api.NewClient(cfg) - if err != nil { - // It seems fine to panic here, since this should be the same input - // we provided to NewClient when we were setup, and we didn't panic then. - // Better not to completely ignore the error though, suppose there's a - // bug in CloneConfig? - panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) - } - client.SetToken(e.client.Token()) - return client -} - -func (e *execDevClusterNode) TLSConfig() *tls.Config { - return e.client.CloneConfig().TLSConfig() -} - -func (dc *ExecDevCluster) ClusterID() string { - return dc.ID -} - -func (dc *ExecDevCluster) Nodes() []VaultClusterNode { - ret := make([]VaultClusterNode, len(dc.ClusterNodes)) - for i := range dc.ClusterNodes { - ret[i] = dc.ClusterNodes[i] - } - return ret -} - -func (dc *ExecDevCluster) GetBarrierKeys() [][]byte { - return dc.barrierKeys -} - -func copyKey(key []byte) []byte { - result := make([]byte, len(key)) - copy(result, key) - return result -} - -func (dc *ExecDevCluster) GetRecoveryKeys() [][]byte { - ret := make([][]byte, len(dc.recoveryKeys)) - for i, k := range dc.recoveryKeys { - ret[i] = copyKey(k) - } - return ret -} - -func (dc *ExecDevCluster) GetBarrierOrRecoveryKeys() [][]byte { - return dc.GetBarrierKeys() -} - -func (dc *ExecDevCluster) SetBarrierKeys(keys [][]byte) { - dc.barrierKeys = make([][]byte, len(keys)) - for i, k := range keys { - dc.barrierKeys[i] = copyKey(k) - } -} - -func (dc *ExecDevCluster) SetRecoveryKeys(keys [][]byte) { - dc.recoveryKeys = make([][]byte, len(keys)) - for i, k := range keys { - dc.recoveryKeys[i] = copyKey(k) - } -} - -func (dc *ExecDevCluster) GetCACertPEMFile() string { - return dc.CACertPEMFile -} - -func (dc *ExecDevCluster) Cleanup() { - dc.stop() -} - -// GetRootToken returns the root token of the cluster, if set -func (dc *ExecDevCluster) GetRootToken() string { - return dc.rootToken -} diff --git a/sdk/helper/testcluster/logging.go b/sdk/helper/testcluster/logging.go deleted file mode 100644 index dda759c7f84f1..0000000000000 --- a/sdk/helper/testcluster/logging.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testcluster - -import ( - "encoding/json" - "strings" - - "github.com/hashicorp/go-hclog" -) - -func JSONLogNoTimestamp(outlog hclog.Logger, text string) { - d := json.NewDecoder(strings.NewReader(text)) - m := map[string]interface{}{} - if err := d.Decode(&m); err != nil { - outlog.Error("failed to decode json output from dev vault", "error", err, "input", text) - return - } - - delete(m, "@timestamp") - message := m["@message"].(string) - delete(m, "@message") - level := m["@level"].(string) - delete(m, "@level") - if module, ok := m["@module"]; ok { - delete(m, "@module") - outlog = outlog.Named(module.(string)) - } - - var pairs []interface{} - for k, v := range m { - pairs = append(pairs, k, v) - } - - outlog.Log(hclog.LevelFromString(level), message, pairs...) -} diff --git a/sdk/helper/testcluster/replication.go b/sdk/helper/testcluster/replication.go deleted file mode 100644 index 46356deff1401..0000000000000 --- a/sdk/helper/testcluster/replication.go +++ /dev/null @@ -1,905 +0,0 @@ -package testcluster - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/mapstructure" -) - -func GetPerformanceToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { - client := pri.Nodes()[0].APIClient() - req := map[string]interface{}{ - "id": id, - } - if secondaryPublicKey != "" { - req["secondary_public_key"] = secondaryPublicKey - } - secret, err := client.Logical().Write("sys/replication/performance/primary/secondary-token", req) - if err != nil { - return "", err - } - - if secondaryPublicKey != "" { - return secret.Data["token"].(string), nil - } - return secret.WrapInfo.Token, nil -} - -func EnablePerfPrimary(ctx context.Context, pri VaultCluster) error { - client := pri.Nodes()[0].APIClient() - _, err := client.Logical().WriteWithContext(ctx, "sys/replication/performance/primary/enable", nil) - if err != nil { - return err - } - - err = WaitForPerfReplicationState(ctx, pri, consts.ReplicationPerformancePrimary) - if err != nil { - return err - } - return WaitForActiveNodeAndPerfStandbys(ctx, pri) -} - -func WaitForPerfReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { - client := cluster.Nodes()[0].APIClient() - var health *api.HealthResponse - var err error - for ctx.Err() == nil { - health, err = client.Sys().HealthWithContext(ctx) - if err == nil && health.ReplicationPerformanceMode == state.GetPerformanceString() { - return nil - } - time.Sleep(500 * time.Millisecond) - } - if err == nil { - err = ctx.Err() - } - return err -} - -func EnablePerformanceSecondaryNoWait(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary bool) error { - postData := map[string]interface{}{ - "token": perfToken, - "ca_file": DefaultCAFile, - } - path := "sys/replication/performance/secondary/enable" - if updatePrimary { - path = "sys/replication/performance/secondary/update-primary" - } - err := WaitForActiveNodeAndPerfStandbys(ctx, sec) - if err != nil { - return err - } - _, err = sec.Nodes()[0].APIClient().Logical().Write(path, postData) - if err != nil { - return err - } - - return WaitForPerfReplicationState(ctx, sec, consts.ReplicationPerformanceSecondary) -} - -func EnablePerformanceSecondary(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary, skipPoisonPill bool) (string, error) { - if err := EnablePerformanceSecondaryNoWait(ctx, perfToken, pri, sec, updatePrimary); err != nil { - return "", err - } - if err := WaitForMatchingMerkleRoots(ctx, "sys/replication/performance/", pri, sec); err != nil { - return "", err - } - root, err := WaitForPerformanceSecondary(ctx, pri, sec, skipPoisonPill) - if err != nil { - return "", err - } - if err := WaitForPerfReplicationWorking(ctx, pri, sec); err != nil { - return "", err - } - return root, nil -} - -func WaitForMatchingMerkleRoots(ctx context.Context, endpoint string, pri, sec VaultCluster) error { - getRoot := func(mode string, cli *api.Client) (string, error) { - status, err := cli.Logical().Read(endpoint + "status") - if err != nil { - return "", err - } - if status == nil || status.Data == nil || status.Data["mode"] == nil { - return "", fmt.Errorf("got nil secret or data") - } - if status.Data["mode"].(string) != mode { - return "", fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) - } - return status.Data["merkle_root"].(string), nil - } - - secClient := sec.Nodes()[0].APIClient() - priClient := pri.Nodes()[0].APIClient() - for i := 0; i < 30; i++ { - secRoot, err := getRoot("secondary", secClient) - if err != nil { - return err - } - priRoot, err := getRoot("primary", priClient) - if err != nil { - return err - } - - if reflect.DeepEqual(priRoot, secRoot) { - return nil - } - time.Sleep(time.Second) - } - - return fmt.Errorf("roots did not become equal") -} - -func WaitForPerformanceWAL(ctx context.Context, pri, sec VaultCluster) error { - endpoint := "sys/replication/performance/" - if err := WaitForMatchingMerkleRoots(ctx, endpoint, pri, sec); err != nil { - return nil - } - getWAL := func(mode, walKey string, cli *api.Client) (int64, error) { - status, err := cli.Logical().Read(endpoint + "status") - if err != nil { - return 0, err - } - if status == nil || status.Data == nil || status.Data["mode"] == nil { - return 0, fmt.Errorf("got nil secret or data") - } - if status.Data["mode"].(string) != mode { - return 0, fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) - } - return status.Data[walKey].(json.Number).Int64() - } - - secClient := sec.Nodes()[0].APIClient() - priClient := pri.Nodes()[0].APIClient() - for ctx.Err() == nil { - secLastRemoteWAL, err := getWAL("secondary", "last_remote_wal", secClient) - if err != nil { - return err - } - priLastPerfWAL, err := getWAL("primary", "last_performance_wal", priClient) - if err != nil { - return err - } - - if secLastRemoteWAL >= priLastPerfWAL { - return nil - } - time.Sleep(time.Second) - } - - return fmt.Errorf("performance WALs on the secondary did not catch up with the primary, context err: %w", ctx.Err()) -} - -func WaitForPerformanceSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) (string, error) { - if len(pri.GetRecoveryKeys()) > 0 { - sec.SetBarrierKeys(pri.GetRecoveryKeys()) - sec.SetRecoveryKeys(pri.GetRecoveryKeys()) - } else { - sec.SetBarrierKeys(pri.GetBarrierKeys()) - sec.SetRecoveryKeys(pri.GetBarrierKeys()) - } - - if len(sec.Nodes()) > 1 { - if skipPoisonPill { - // As part of prepareSecondary on the active node the keyring is - // deleted from storage. Its absence can cause standbys to seal - // themselves. But it's not reliable, so we'll seal them - // ourselves to force the issue. - for i := range sec.Nodes()[1:] { - if err := SealNode(ctx, sec, i+1); err != nil { - return "", err - } - } - } else { - // We want to make sure we unseal all the nodes so we first need to wait - // until two of the nodes seal due to the poison pill being written - if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { - return "", err - } - } - } - if _, err := WaitForActiveNode(ctx, sec); err != nil { - return "", err - } - if err := UnsealAllNodes(ctx, sec); err != nil { - return "", err - } - - perfSecondaryRootToken, err := GenerateRoot(sec, GenerateRootRegular) - if err != nil { - return "", err - } - sec.SetRootToken(perfSecondaryRootToken) - if err := WaitForActiveNodeAndPerfStandbys(ctx, sec); err != nil { - return "", err - } - - return perfSecondaryRootToken, nil -} - -func WaitForPerfReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { - priActiveIdx, err := WaitForActiveNode(ctx, pri) - if err != nil { - return err - } - secActiveIdx, err := WaitForActiveNode(ctx, sec) - if err != nil { - return err - } - - priClient, secClient := pri.Nodes()[priActiveIdx].APIClient(), sec.Nodes()[secActiveIdx].APIClient() - mountPoint, err := uuid.GenerateUUID() - if err != nil { - return err - } - err = priClient.Sys().Mount(mountPoint, &api.MountInput{ - Type: "kv", - Local: false, - }) - if err != nil { - return fmt.Errorf("unable to mount KV engine on primary") - } - - path := mountPoint + "/foo" - _, err = priClient.Logical().Write(path, map[string]interface{}{ - "bar": 1, - }) - if err != nil { - return fmt.Errorf("unable to write KV on primary", "path", path) - } - - for ctx.Err() == nil { - var secret *api.Secret - secret, err = secClient.Logical().Read(path) - if err == nil && secret != nil { - err = priClient.Sys().Unmount(mountPoint) - if err != nil { - return fmt.Errorf("unable to unmount KV engine on primary") - } - return nil - } - time.Sleep(100 * time.Millisecond) - } - if err == nil { - err = ctx.Err() - } - return fmt.Errorf("unable to read replicated KV on secondary, path=%s, err=%v", path, err) -} - -func SetupTwoClusterPerfReplication(ctx context.Context, pri, sec VaultCluster) error { - if err := EnablePerfPrimary(ctx, pri); err != nil { - return err - } - perfToken, err := GetPerformanceToken(pri, sec.ClusterID(), "") - if err != nil { - return err - } - - _, err = EnablePerformanceSecondary(ctx, perfToken, pri, sec, false, false) - return err -} - -// PassiveWaitForActiveNodeAndPerfStandbys should be used instead of -// WaitForActiveNodeAndPerfStandbys when you don't want to do any writes -// as a side-effect. This returns perfStandby nodes in the cluster and -// an error. -func PassiveWaitForActiveNodeAndPerfStandbys(ctx context.Context, pri VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { - leaderNode, standbys, err := GetActiveAndStandbys(ctx, pri) - if err != nil { - return nil, nil, fmt.Errorf("failed to derive standby nodes, %w", err) - } - - for i, node := range standbys { - client := node.APIClient() - // Make sure we get perf standby nodes - if err = EnsureCoreIsPerfStandby(ctx, client); err != nil { - return nil, nil, fmt.Errorf("standby node %d is not a perfStandby, %w", i, err) - } - } - - return leaderNode, standbys, nil -} - -func GetActiveAndStandbys(ctx context.Context, cluster VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { - var leaderIndex int - var err error - if leaderIndex, err = WaitForActiveNode(ctx, cluster); err != nil { - return nil, nil, err - } - - var leaderNode VaultClusterNode - var nodes []VaultClusterNode - for i, node := range cluster.Nodes() { - if i == leaderIndex { - leaderNode = node - continue - } - nodes = append(nodes, node) - } - - return leaderNode, nodes, nil -} - -func EnsureCoreIsPerfStandby(ctx context.Context, client *api.Client) error { - var err error - var health *api.HealthResponse - for ctx.Err() == nil { - health, err = client.Sys().HealthWithContext(ctx) - if err == nil && health.PerformanceStandby { - return nil - } - time.Sleep(time.Millisecond * 500) - } - if err == nil { - err = ctx.Err() - } - return err -} - -func WaitForDRReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { - client := cluster.Nodes()[0].APIClient() - var health *api.HealthResponse - var err error - for ctx.Err() == nil { - health, err = client.Sys().HealthWithContext(ctx) - if err == nil && health.ReplicationDRMode == state.GetDRString() { - return nil - } - time.Sleep(500 * time.Millisecond) - } - if err == nil { - err = ctx.Err() - } - return err -} - -func EnableDrPrimary(ctx context.Context, pri VaultCluster) error { - client := pri.Nodes()[0].APIClient() - _, err := client.Logical().Write("sys/replication/dr/primary/enable", nil) - if err != nil { - return err - } - - err = WaitForDRReplicationState(ctx, pri, consts.ReplicationDRPrimary) - if err != nil { - return err - } - return WaitForActiveNodeAndPerfStandbys(ctx, pri) -} - -func GenerateDRActivationToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { - client := pri.Nodes()[0].APIClient() - req := map[string]interface{}{ - "id": id, - } - if secondaryPublicKey != "" { - req["secondary_public_key"] = secondaryPublicKey - } - secret, err := client.Logical().Write("sys/replication/dr/primary/secondary-token", req) - if err != nil { - return "", err - } - - if secondaryPublicKey != "" { - return secret.Data["token"].(string), nil - } - return secret.WrapInfo.Token, nil -} - -func WaitForDRSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) error { - if len(pri.GetRecoveryKeys()) > 0 { - sec.SetBarrierKeys(pri.GetRecoveryKeys()) - sec.SetRecoveryKeys(pri.GetRecoveryKeys()) - } else { - sec.SetBarrierKeys(pri.GetBarrierKeys()) - sec.SetRecoveryKeys(pri.GetBarrierKeys()) - } - - if len(sec.Nodes()) > 1 { - if skipPoisonPill { - // As part of prepareSecondary on the active node the keyring is - // deleted from storage. Its absence can cause standbys to seal - // themselves. But it's not reliable, so we'll seal them - // ourselves to force the issue. - for i := range sec.Nodes()[1:] { - if err := SealNode(ctx, sec, i+1); err != nil { - return err - } - } - } else { - // We want to make sure we unseal all the nodes so we first need to wait - // until two of the nodes seal due to the poison pill being written - if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { - return err - } - } - } - if _, err := WaitForActiveNode(ctx, sec); err != nil { - return err - } - - // unseal nodes - for i := range sec.Nodes() { - if err := UnsealNode(ctx, sec, i); err != nil { - // Sometimes when we get here it's already unsealed on its own - // and then this fails for DR secondaries so check again - // The error is "path disabled in replication DR secondary mode". - if healthErr := NodeHealthy(ctx, sec, i); healthErr != nil { - // return the original error - return err - } - } - } - - sec.SetRootToken(pri.GetRootToken()) - - if _, err := WaitForActiveNode(ctx, sec); err != nil { - return err - } - - return nil -} - -func EnableDRSecondaryNoWait(ctx context.Context, sec VaultCluster, drToken string) error { - postData := map[string]interface{}{ - "token": drToken, - "ca_file": DefaultCAFile, - } - - _, err := sec.Nodes()[0].APIClient().Logical().Write("sys/replication/dr/secondary/enable", postData) - if err != nil { - return err - } - - return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary) -} - -func WaitForReplicationStatus(ctx context.Context, client *api.Client, dr bool, accept func(map[string]interface{}) error) error { - url := "sys/replication/performance/status" - if dr { - url = "sys/replication/dr/status" - } - - var err error - var secret *api.Secret - for ctx.Err() == nil { - secret, err = client.Logical().Read(url) - if err == nil && secret != nil && secret.Data != nil { - if err = accept(secret.Data); err == nil { - return nil - } - } - time.Sleep(500 * time.Millisecond) - } - if err == nil { - err = ctx.Err() - } - - return fmt.Errorf("unable to get acceptable replication status: error=%v secret=%#v", err, secret) -} - -func WaitForDRReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { - priClient := pri.Nodes()[0].APIClient() - secClient := sec.Nodes()[0].APIClient() - - // Make sure we've entered stream-wals mode - err := WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) error { - state := secret["state"] - if state == string("stream-wals") { - return nil - } - return fmt.Errorf("expected stream-wals replication state, got %v", state) - }) - if err != nil { - return err - } - - // Now write some data and make sure that we see last_remote_wal nonzero, i.e. - // at least one WAL has been streamed. - secret, err := priClient.Auth().Token().Create(&api.TokenCreateRequest{}) - if err != nil { - return err - } - - // Revoke the token since some tests won't be happy to see it. - err = priClient.Auth().Token().RevokeTree(secret.Auth.ClientToken) - if err != nil { - return err - } - - err = WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) error { - state := secret["state"] - if state != string("stream-wals") { - return fmt.Errorf("expected stream-wals replication state, got %v", state) - } - - if secret["last_remote_wal"] != nil { - lastRemoteWal, _ := secret["last_remote_wal"].(json.Number).Int64() - if lastRemoteWal <= 0 { - return fmt.Errorf("expected last_remote_wal to be greater than zero") - } - return nil - } - - return fmt.Errorf("replication seems to be still catching up, maybe need to wait more") - }) - if err != nil { - return err - } - return nil -} - -func EnableDrSecondary(ctx context.Context, pri, sec VaultCluster, drToken string) error { - err := EnableDRSecondaryNoWait(ctx, sec, drToken) - if err != nil { - return err - } - - if err = WaitForMatchingMerkleRoots(ctx, "sys/replication/dr/", pri, sec); err != nil { - return err - } - - err = WaitForDRSecondary(ctx, pri, sec, false) - if err != nil { - return err - } - - if err = WaitForDRReplicationWorking(ctx, pri, sec); err != nil { - return err - } - return nil -} - -func SetupTwoClusterDRReplication(ctx context.Context, pri, sec VaultCluster) error { - if err := EnableDrPrimary(ctx, pri); err != nil { - return err - } - - drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") - if err != nil { - return err - } - err = EnableDrSecondary(ctx, pri, sec, drToken) - if err != nil { - return err - } - return nil -} - -func DemoteDRPrimary(client *api.Client) error { - _, err := client.Logical().Write("sys/replication/dr/primary/demote", map[string]interface{}{}) - return err -} - -func createBatchToken(client *api.Client, path string) (string, error) { - // TODO: should these be more random in case more than one batch token needs to be created? - suffix := strings.Replace(path, "/", "", -1) - policyName := "path-batch-policy-" + suffix - roleName := "path-batch-role-" + suffix - - rules := fmt.Sprintf(`path "%s" { capabilities = [ "read", "update" ] }`, path) - - // create policy - _, err := client.Logical().Write("sys/policy/"+policyName, map[string]interface{}{ - "policy": rules, - }) - if err != nil { - return "", err - } - - // create a role - _, err = client.Logical().Write("auth/token/roles/"+roleName, map[string]interface{}{ - "allowed_policies": policyName, - "orphan": true, - "renewable": false, - "token_type": "batch", - }) - if err != nil { - return "", err - } - - // create batch token - secret, err := client.Logical().Write("auth/token/create/"+roleName, nil) - if err != nil { - return "", err - } - - return secret.Auth.ClientToken, nil -} - -// PromoteDRSecondaryWithBatchToken creates a batch token for DR promotion -// before promotion, it demotes the primary cluster. The primary cluster needs -// to be functional for the generation of the batch token -func PromoteDRSecondaryWithBatchToken(ctx context.Context, pri, sec VaultCluster) error { - client := pri.Nodes()[0].APIClient() - drToken, err := createBatchToken(client, "sys/replication/dr/secondary/promote") - if err != nil { - return err - } - - err = DemoteDRPrimary(client) - if err != nil { - return err - } - - return promoteDRSecondaryInternal(ctx, sec, drToken) -} - -// PromoteDRSecondary generates a DR operation token on the secondary using -// unseal/recovery keys. Therefore, the primary cluster could potentially -// be out of service. -func PromoteDRSecondary(ctx context.Context, sec VaultCluster) error { - // generate DR operation token to do update primary on vC to point to - // the new perfSec primary vD - drToken, err := GenerateRoot(sec, GenerateRootDR) - if err != nil { - return err - } - return promoteDRSecondaryInternal(ctx, sec, drToken) -} - -func promoteDRSecondaryInternal(ctx context.Context, sec VaultCluster, drToken string) error { - secClient := sec.Nodes()[0].APIClient() - - // Allow retries of 503s, e.g.: replication is still catching up, - // try again later or provide the "force" argument - oldMaxRetries := secClient.MaxRetries() - secClient.SetMaxRetries(10) - defer secClient.SetMaxRetries(oldMaxRetries) - resp, err := secClient.Logical().Write("sys/replication/dr/secondary/promote", map[string]interface{}{ - "dr_operation_token": drToken, - }) - if err != nil { - return err - } - if resp == nil { - return fmt.Errorf("nil status response during DR promotion") - } - - if _, err := WaitForActiveNode(ctx, sec); err != nil { - return err - } - - return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRPrimary) -} - -func checkClusterAddr(ctx context.Context, pri, sec VaultCluster) error { - priClient := pri.Nodes()[0].APIClient() - priLeader, err := priClient.Sys().LeaderWithContext(ctx) - if err != nil { - return err - } - secClient := sec.Nodes()[0].APIClient() - endpoint := "sys/replication/dr/" - status, err := secClient.Logical().Read(endpoint + "status") - if err != nil { - return err - } - if status == nil || status.Data == nil { - return fmt.Errorf("got nil secret or data") - } - - var priAddrs []string - err = mapstructure.Decode(status.Data["known_primary_cluster_addrs"], &priAddrs) - if err != nil { - return err - } - if !strutil.StrListContains(priAddrs, priLeader.LeaderClusterAddress) { - return fmt.Errorf("failed to fine the expected primary cluster address %v in known_primary_cluster_addrs", priLeader.LeaderClusterAddress) - } - - return nil -} - -func UpdatePrimary(ctx context.Context, pri, sec VaultCluster) error { - // generate DR operation token to do update primary on vC to point to - // the new perfSec primary vD - rootToken, err := GenerateRoot(sec, GenerateRootDR) - if err != nil { - return err - } - - // secondary activation token - drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") - if err != nil { - return err - } - - // update-primary on vC (new perfSec Dr secondary) to point to - // the new perfSec Dr primary - secClient := sec.Nodes()[0].APIClient() - resp, err := secClient.Logical().Write("sys/replication/dr/secondary/update-primary", map[string]interface{}{ - "dr_operation_token": rootToken, - "token": drToken, - "ca_file": DefaultCAFile, - }) - if err != nil { - return err - } - if resp == nil { - return fmt.Errorf("nil status response during update primary") - } - - if _, err = WaitForActiveNode(ctx, sec); err != nil { - return err - } - - if err = WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary); err != nil { - return err - } - - if err = checkClusterAddr(ctx, pri, sec); err != nil { - return err - } - - return nil -} - -func SetupFourClusterReplication(ctx context.Context, pri, sec, pridr, secdr VaultCluster) error { - err := SetupTwoClusterPerfReplication(ctx, pri, sec) - if err != nil { - return err - } - err = SetupTwoClusterDRReplication(ctx, pri, pridr) - if err != nil { - return err - } - err = SetupTwoClusterDRReplication(ctx, sec, secdr) - if err != nil { - return err - } - return nil -} - -type ReplicationSet struct { - // By convention, we recommend the following naming scheme for - // clusters in this map: - // A: perf primary - // B: primary's DR - // C: first perf secondary of A - // D: C's DR - // E: second perf secondary of A - // F: E's DR - // ... etc. - // - // We use generic names rather than role-specific names because - // that's less confusing when promotions take place that result in role - // changes. In other words, if D gets promoted to replace C as a perf - // secondary, and C gets demoted and updated to become D's DR secondary, - // they should maintain their initial names of D and C throughout. - Clusters map[string]VaultCluster - Builder ClusterBuilder - Logger hclog.Logger - CA *CA -} - -type ClusterBuilder func(ctx context.Context, name string, logger hclog.Logger) (VaultCluster, error) - -func NewReplicationSet(b ClusterBuilder) (*ReplicationSet, error) { - return &ReplicationSet{ - Clusters: map[string]VaultCluster{}, - Builder: b, - Logger: hclog.NewNullLogger(), - }, nil -} - -func (r *ReplicationSet) StandardPerfReplication(ctx context.Context) error { - for _, name := range []string{"A", "C"} { - if _, ok := r.Clusters[name]; !ok { - cluster, err := r.Builder(ctx, name, r.Logger) - if err != nil { - return err - } - r.Clusters[name] = cluster - } - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - err := SetupTwoClusterPerfReplication(ctx, r.Clusters["A"], r.Clusters["C"]) - if err != nil { - return err - } - - return nil -} - -func (r *ReplicationSet) StandardDRReplication(ctx context.Context) error { - for _, name := range []string{"A", "B"} { - if _, ok := r.Clusters[name]; !ok { - cluster, err := r.Builder(ctx, name, r.Logger) - if err != nil { - return err - } - r.Clusters[name] = cluster - } - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - err := SetupTwoClusterDRReplication(ctx, r.Clusters["A"], r.Clusters["B"]) - if err != nil { - return err - } - - return nil -} - -func (r *ReplicationSet) GetFourReplicationCluster(ctx context.Context) error { - for _, name := range []string{"A", "B", "C", "D"} { - if _, ok := r.Clusters[name]; !ok { - cluster, err := r.Builder(ctx, name, r.Logger) - if err != nil { - return err - } - r.Clusters[name] = cluster - } - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - err := SetupFourClusterReplication(ctx, r.Clusters["A"], r.Clusters["C"], r.Clusters["B"], r.Clusters["D"]) - if err != nil { - return err - } - return nil -} - -func (r *ReplicationSet) Cleanup() { - for _, cluster := range r.Clusters { - cluster.Cleanup() - } -} - -func WaitForPerfReplicationConnectionStatus(ctx context.Context, client *api.Client) error { - type Primary struct { - APIAddress string `mapstructure:"api_address"` - ConnectionStatus string `mapstructure:"connection_status"` - ClusterAddress string `mapstructure:"cluster_address"` - LastHeartbeat string `mapstructure:"last_heartbeat"` - } - type Status struct { - Primaries []Primary `mapstructure:"primaries"` - } - return WaitForPerfReplicationStatus(ctx, client, func(m map[string]interface{}) error { - var status Status - err := mapstructure.Decode(m, &status) - if err != nil { - return err - } - if len(status.Primaries) == 0 { - return fmt.Errorf("primaries is zero") - } - for _, v := range status.Primaries { - if v.ConnectionStatus == "connected" { - return nil - } - } - return fmt.Errorf("no primaries connected") - }) -} - -func WaitForPerfReplicationStatus(ctx context.Context, client *api.Client, accept func(map[string]interface{}) error) error { - var err error - var secret *api.Secret - for ctx.Err() == nil { - secret, err = client.Logical().Read("sys/replication/performance/status") - if err == nil && secret != nil && secret.Data != nil { - if err = accept(secret.Data); err == nil { - return nil - } - } - time.Sleep(500 * time.Millisecond) - } - return fmt.Errorf("unable to get acceptable replication status within allotted time: error=%v secret=%#v", err, secret) -} diff --git a/sdk/helper/testcluster/types.go b/sdk/helper/testcluster/types.go deleted file mode 100644 index 084413521ce88..0000000000000 --- a/sdk/helper/testcluster/types.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testcluster - -import ( - "crypto/ecdsa" - "crypto/tls" - "crypto/x509" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" -) - -type VaultClusterNode interface { - APIClient() *api.Client - TLSConfig() *tls.Config -} - -type VaultCluster interface { - Nodes() []VaultClusterNode - GetBarrierKeys() [][]byte - GetRecoveryKeys() [][]byte - GetBarrierOrRecoveryKeys() [][]byte - SetBarrierKeys([][]byte) - SetRecoveryKeys([][]byte) - GetCACertPEMFile() string - Cleanup() - ClusterID() string - NamedLogger(string) hclog.Logger - SetRootToken(token string) - GetRootToken() string -} - -type VaultNodeConfig struct { - // Not configurable because cluster creator wants to control these: - // PluginDirectory string `hcl:"plugin_directory"` - // APIAddr string `hcl:"api_addr"` - // ClusterAddr string `hcl:"cluster_addr"` - // Storage *Storage `hcl:"-"` - // HAStorage *Storage `hcl:"-"` - // DisableMlock bool `hcl:"disable_mlock"` - // ClusterName string `hcl:"cluster_name"` - - // Not configurable yet: - // Listeners []*Listener `hcl:"-"` - // Seals []*KMS `hcl:"-"` - // Entropy *Entropy `hcl:"-"` - // Telemetry *Telemetry `hcl:"telemetry"` - // HCPLinkConf *HCPLinkConfig `hcl:"cloud"` - // PidFile string `hcl:"pid_file"` - // ServiceRegistrationType string - // ServiceRegistrationOptions map[string]string - - StorageOptions map[string]string - - DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"` - LogFormat string `json:"log_format"` - LogLevel string `json:"log_level"` - CacheSize int `json:"cache_size"` - DisableCache bool `json:"disable_cache"` - DisablePrintableCheck bool `json:"disable_printable_check"` - EnableUI bool `json:"ui"` - MaxLeaseTTL time.Duration `json:"max_lease_ttl"` - DefaultLeaseTTL time.Duration `json:"default_lease_ttl"` - ClusterCipherSuites string `json:"cluster_cipher_suites"` - PluginFileUid int `json:"plugin_file_uid"` - PluginFilePermissions int `json:"plugin_file_permissions"` - EnableRawEndpoint bool `json:"raw_storage_endpoint"` - DisableClustering bool `json:"disable_clustering"` - DisablePerformanceStandby bool `json:"disable_performance_standby"` - DisableSealWrap bool `json:"disable_sealwrap"` - DisableIndexing bool `json:"disable_indexing"` - DisableSentinelTrace bool `json:"disable_sentinel"` - EnableResponseHeaderHostname bool `json:"enable_response_header_hostname"` - LogRequestsLevel string `json:"log_requests_level"` - EnableResponseHeaderRaftNodeID bool `json:"enable_response_header_raft_node_id"` - LicensePath string `json:"license_path"` -} - -type ClusterNode struct { - APIAddress string `json:"api_address"` -} - -type ClusterJson struct { - Nodes []ClusterNode `json:"nodes"` - CACertPath string `json:"ca_cert_path"` - RootToken string `json:"root_token"` -} - -type ClusterOptions struct { - ClusterName string - KeepStandbysSealed bool - SkipInit bool - CACert []byte - NumCores int - TmpDir string - Logger hclog.Logger - VaultNodeConfig *VaultNodeConfig - VaultLicense string -} - -type CA struct { - CACert *x509.Certificate - CACertBytes []byte - CACertPEM []byte - CACertPEMFile string - CAKey *ecdsa.PrivateKey - CAKeyPEM []byte -} diff --git a/sdk/helper/testcluster/util.go b/sdk/helper/testcluster/util.go deleted file mode 100644 index 4ecf5f5338998..0000000000000 --- a/sdk/helper/testcluster/util.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testcluster - -import ( - "context" - "encoding/base64" - "encoding/hex" - "fmt" - "sync/atomic" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/xor" -) - -// Note that OSS standbys will not accept seal requests. And ent perf standbys -// may fail it as well if they haven't yet been able to get "elected" as perf standbys. -func SealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { - if nodeIdx >= len(cluster.Nodes()) { - return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) - } - node := cluster.Nodes()[nodeIdx] - client := node.APIClient() - - err := client.Sys().SealWithContext(ctx) - if err != nil { - return err - } - - return NodeSealed(ctx, cluster, nodeIdx) -} - -func SealAllNodes(ctx context.Context, cluster VaultCluster) error { - for i := range cluster.Nodes() { - if err := SealNode(ctx, cluster, i); err != nil { - return err - } - } - return nil -} - -func UnsealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { - if nodeIdx >= len(cluster.Nodes()) { - return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) - } - node := cluster.Nodes()[nodeIdx] - client := node.APIClient() - - for _, key := range cluster.GetBarrierOrRecoveryKeys() { - _, err := client.Sys().UnsealWithContext(ctx, hex.EncodeToString(key)) - if err != nil { - return err - } - } - - return NodeHealthy(ctx, cluster, nodeIdx) -} - -func UnsealAllNodes(ctx context.Context, cluster VaultCluster) error { - for i := range cluster.Nodes() { - if err := UnsealNode(ctx, cluster, i); err != nil { - return err - } - } - return nil -} - -func NodeSealed(ctx context.Context, cluster VaultCluster, nodeIdx int) error { - if nodeIdx >= len(cluster.Nodes()) { - return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) - } - node := cluster.Nodes()[nodeIdx] - client := node.APIClient() - - var health *api.HealthResponse - var err error - for ctx.Err() == nil { - health, err = client.Sys().HealthWithContext(ctx) - switch { - case err != nil: - case !health.Sealed: - err = fmt.Errorf("unsealed: %#v", health) - default: - return nil - } - time.Sleep(500 * time.Millisecond) - } - return fmt.Errorf("node %d is not sealed: %v", nodeIdx, err) -} - -func WaitForNCoresSealed(ctx context.Context, cluster VaultCluster, n int) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - errs := make(chan error) - for i := range cluster.Nodes() { - go func(i int) { - var err error - for ctx.Err() == nil { - err = NodeSealed(ctx, cluster, i) - if err == nil { - errs <- nil - return - } - time.Sleep(100 * time.Millisecond) - } - if err == nil { - err = ctx.Err() - } - errs <- err - }(i) - } - - var merr *multierror.Error - var sealed int - for range cluster.Nodes() { - err := <-errs - if err != nil { - merr = multierror.Append(merr, err) - } else { - sealed++ - if sealed == n { - return nil - } - } - } - - return fmt.Errorf("%d cores were not sealed, errs: %v", n, merr.ErrorOrNil()) -} - -func NodeHealthy(ctx context.Context, cluster VaultCluster, nodeIdx int) error { - if nodeIdx >= len(cluster.Nodes()) { - return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) - } - node := cluster.Nodes()[nodeIdx] - client := node.APIClient() - - var health *api.HealthResponse - var err error - for ctx.Err() == nil { - health, err = client.Sys().HealthWithContext(ctx) - switch { - case err != nil: - case health == nil: - err = fmt.Errorf("nil response to health check") - case health.Sealed: - err = fmt.Errorf("sealed: %#v", health) - default: - return nil - } - time.Sleep(500 * time.Millisecond) - } - return fmt.Errorf("node %d is unhealthy: %v", nodeIdx, err) -} - -func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) { - for i, node := range cluster.Nodes() { - client := node.APIClient() - ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) - resp, err := client.Sys().LeaderWithContext(ctx) - cancel() - if err != nil || resp == nil || !resp.IsSelf { - continue - } - return i, nil - } - return -1, fmt.Errorf("no leader found") -} - -func WaitForActiveNode(ctx context.Context, cluster VaultCluster) (int, error) { - for ctx.Err() == nil { - if idx, _ := LeaderNode(ctx, cluster); idx != -1 { - return idx, nil - } - time.Sleep(500 * time.Millisecond) - } - return -1, ctx.Err() -} - -func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster) error { - logger := cluster.NamedLogger("WaitForActiveNodeAndPerfStandbys") - // This WaitForActiveNode was added because after a Raft cluster is sealed - // and then unsealed, when it comes up it may have a different leader than - // Core0, making this helper fail. - // A sleep before calling WaitForActiveNodeAndPerfStandbys seems to sort - // things out, but so apparently does this. We should be able to eliminate - // this call to WaitForActiveNode by reworking the logic in this method. - if _, err := WaitForActiveNode(ctx, cluster); err != nil { - return err - } - - if len(cluster.Nodes()) == 1 { - return nil - } - - expectedStandbys := len(cluster.Nodes()) - 1 - - mountPoint, err := uuid.GenerateUUID() - if err != nil { - return err - } - leaderClient := cluster.Nodes()[0].APIClient() - - for ctx.Err() == nil { - err = leaderClient.Sys().MountWithContext(ctx, mountPoint, &api.MountInput{ - Type: "kv", - Local: true, - }) - if err == nil { - break - } - time.Sleep(1 * time.Second) - } - if err != nil { - return fmt.Errorf("unable to mount KV engine: %v", err) - } - path := mountPoint + "/waitforactivenodeandperfstandbys" - var standbys, actives int64 - errchan := make(chan error, len(cluster.Nodes())) - for i := range cluster.Nodes() { - go func(coreNo int) { - node := cluster.Nodes()[coreNo] - client := node.APIClient() - val := 1 - var err error - defer func() { - errchan <- err - }() - - var lastWAL uint64 - for ctx.Err() == nil { - _, err = leaderClient.Logical().WriteWithContext(ctx, path, map[string]interface{}{ - "bar": val, - }) - val++ - time.Sleep(250 * time.Millisecond) - if err != nil { - continue - } - var leader *api.LeaderResponse - leader, err = client.Sys().LeaderWithContext(ctx) - if err != nil { - continue - } - switch { - case leader.IsSelf: - logger.Trace("waiting for core", "core", coreNo, "isLeader", true) - atomic.AddInt64(&actives, 1) - return - case leader.PerfStandby && leader.PerfStandbyLastRemoteWAL > 0: - switch { - case lastWAL == 0: - lastWAL = leader.PerfStandbyLastRemoteWAL - logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) - case lastWAL < leader.PerfStandbyLastRemoteWAL: - logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) - atomic.AddInt64(&standbys, 1) - return - } - } - } - }(i) - } - - errs := make([]error, 0, len(cluster.Nodes())) - for range cluster.Nodes() { - errs = append(errs, <-errchan) - } - if actives != 1 || int(standbys) != expectedStandbys { - return fmt.Errorf("expected 1 active core and %d standbys, got %d active and %d standbys, errs: %v", - expectedStandbys, actives, standbys, errs) - } - - for ctx.Err() == nil { - err = leaderClient.Sys().UnmountWithContext(ctx, mountPoint) - if err == nil { - break - } - time.Sleep(time.Second) - } - if err != nil { - return fmt.Errorf("unable to unmount KV engine on primary") - } - return nil -} - -type GenerateRootKind int - -const ( - GenerateRootRegular GenerateRootKind = iota - GenerateRootDR - GenerateRecovery -) - -func GenerateRoot(cluster VaultCluster, kind GenerateRootKind) (string, error) { - // If recovery keys supported, use those to perform root token generation instead - keys := cluster.GetBarrierOrRecoveryKeys() - - client := cluster.Nodes()[0].APIClient() - - var err error - var status *api.GenerateRootStatusResponse - switch kind { - case GenerateRootRegular: - status, err = client.Sys().GenerateRootInit("", "") - case GenerateRootDR: - status, err = client.Sys().GenerateDROperationTokenInit("", "") - case GenerateRecovery: - status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "") - } - if err != nil { - return "", err - } - - if status.Required > len(keys) { - return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys)) - } - - otp := status.OTP - - for i, key := range keys { - if i >= status.Required { - break - } - - strKey := base64.StdEncoding.EncodeToString(key) - switch kind { - case GenerateRootRegular: - status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce) - case GenerateRootDR: - status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce) - case GenerateRecovery: - status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce) - } - if err != nil { - return "", err - } - } - if !status.Complete { - return "", fmt.Errorf("generate root operation did not end successfully") - } - - tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken) - if err != nil { - return "", err - } - tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) - if err != nil { - return "", err - } - return string(tokenBytes), nil -} diff --git a/sdk/helper/testhelpers/output.go b/sdk/helper/testhelpers/output.go deleted file mode 100644 index 769a63a189498..0000000000000 --- a/sdk/helper/testhelpers/output.go +++ /dev/null @@ -1,81 +0,0 @@ -package testhelpers - -import ( - "crypto/sha256" - "fmt" - "reflect" - - "github.com/mitchellh/go-testing-interface" - "github.com/mitchellh/mapstructure" -) - -// ToMap renders an input value of any type as a map. This is intended for -// logging human-readable data dumps in test logs, so it uses the `json` -// tags on struct fields: this makes it easy to exclude `"-"` values that -// are typically not interesting, respect omitempty, etc. -// -// We also replace any []byte fields with a hash of their value. -// This is usually sufficient for test log purposes, and is a lot more readable -// than a big array of individual byte values like Go would normally stringify a -// byte slice. -func ToMap(in any) (map[string]any, error) { - temp := make(map[string]any) - cfg := &mapstructure.DecoderConfig{ - TagName: "json", - IgnoreUntaggedFields: true, - Result: &temp, - } - md, err := mapstructure.NewDecoder(cfg) - if err != nil { - return nil, err - } - err = md.Decode(in) - if err != nil { - return nil, err - } - - // mapstructure doesn't call the DecodeHook for each field when doing - // struct->map conversions, but it does for map->map, so call it a second - // time to convert each []byte field. - out := make(map[string]any) - md2, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - Result: &out, - DecodeHook: func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { - if from.Kind() != reflect.Slice || from.Elem().Kind() != reflect.Uint8 { - return data, nil - } - b := data.([]byte) - return fmt.Sprintf("%x", sha256.Sum256(b)), nil - }, - }) - if err != nil { - return nil, err - } - err = md2.Decode(temp) - if err != nil { - return nil, err - } - - return out, nil -} - -// ToString renders its input using ToMap, and returns a string containing the -// result or an error if that fails. -func ToString(in any) string { - m, err := ToMap(in) - if err != nil { - return err.Error() - } - return fmt.Sprintf("%v", m) -} - -// StringOrDie renders its input using ToMap, and returns a string containing the -// result. If rendering yields an error, calls t.Fatal. -func StringOrDie(t testing.T, in any) string { - t.Helper() - m, err := ToMap(in) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("%v", m) -} diff --git a/sdk/helper/testhelpers/output_test.go b/sdk/helper/testhelpers/output_test.go deleted file mode 100644 index 257d9480942f6..0000000000000 --- a/sdk/helper/testhelpers/output_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package testhelpers - -import ( - "fmt" - "reflect" - "testing" -) - -func TestToMap(t *testing.T) { - type s struct { - A string `json:"a"` - B []byte `json:"b"` - C map[string]string `json:"c"` - D string `json:"-"` - } - type args struct { - in s - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "basic", - args: args{s{A: "a", B: []byte("bytes"), C: map[string]string{"k": "v"}, D: "d"}}, - want: "map[a:a b:277089d91c0bdf4f2e6862ba7e4a07605119431f5d13f726dd352b06f1b206a9 c:map[k:v]]", - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m, err := ToMap(&tt.args.in) - if (err != nil) != tt.wantErr { - t.Errorf("ToMap() error = %v, wantErr %v", err, tt.wantErr) - return - } - got := fmt.Sprintf("%s", m) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("ToMap() got = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/sdk/helper/testhelpers/schema/response_validation.go b/sdk/helper/testhelpers/schema/response_validation.go deleted file mode 100644 index 430d1754a56f9..0000000000000 --- a/sdk/helper/testhelpers/schema/response_validation.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package schema - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "testing" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -// ValidateResponse is a test helper that validates whether the given response -// object conforms to the response schema (schema.Fields). It cycles through -// the data map and validates conversions in the schema. In "strict" mode, this -// function will also ensure that the data map has all schema-required fields -// and does not have any fields outside of the schema. -func ValidateResponse(t *testing.T, schema *framework.Response, response *logical.Response, strict bool) { - t.Helper() - - if response != nil { - ValidateResponseData(t, schema, response.Data, strict) - } else { - ValidateResponseData(t, schema, nil, strict) - } -} - -// ValidateResponseData is a test helper that validates whether the given -// response data map conforms to the response schema (schema.Fields). It cycles -// through the data map and validates conversions in the schema. In "strict" -// mode, this function will also ensure that the data map has all schema's -// requred fields and does not have any fields outside of the schema. -func ValidateResponseData(t *testing.T, schema *framework.Response, data map[string]interface{}, strict bool) { - t.Helper() - - if err := validateResponseDataImpl( - schema, - data, - strict, - ); err != nil { - t.Fatalf("validation error: %v; response data: %#v", err, data) - } -} - -// validateResponseDataImpl is extracted so that it can be tested -func validateResponseDataImpl(schema *framework.Response, data map[string]interface{}, strict bool) error { - // nothing to validate - if schema == nil { - return nil - } - - // Certain responses may come through with non-2xx status codes. While - // these are not always errors (e.g. 3xx redirection codes), we don't - // consider them for the purposes of schema validation - if status, exists := data[logical.HTTPStatusCode]; exists { - s, ok := status.(int) - if ok && (s < 200 || s > 299) { - return nil - } - } - - // Marshal the data to JSON and back to convert the map's values into - // JSON strings expected by Validate() and ValidateStrict(). This is - // not efficient and is done for testing purposes only. - jsonBytes, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("failed to convert input to json: %w", err) - } - - var dataWithStringValues map[string]interface{} - if err := json.Unmarshal( - jsonBytes, - &dataWithStringValues, - ); err != nil { - return fmt.Errorf("failed to unmashal data: %w", err) - } - - // these are special fields that will not show up in the final response and - // should be ignored - for _, field := range []string{ - logical.HTTPContentType, - logical.HTTPRawBody, - logical.HTTPStatusCode, - logical.HTTPRawBodyAlreadyJSONDecoded, - logical.HTTPCacheControlHeader, - logical.HTTPPragmaHeader, - logical.HTTPWWWAuthenticateHeader, - } { - delete(dataWithStringValues, field) - - if _, ok := schema.Fields[field]; ok { - return fmt.Errorf("encountered a reserved field in response schema: %s", field) - } - } - - // Validate - fd := framework.FieldData{ - Raw: dataWithStringValues, - Schema: schema.Fields, - } - - if strict { - return fd.ValidateStrict() - } - - return fd.Validate() -} - -// FindResponseSchema is a test helper to extract response schema from the -// given framework path / operation. -func FindResponseSchema(t *testing.T, paths []*framework.Path, pathIdx int, operation logical.Operation) *framework.Response { - t.Helper() - - if pathIdx >= len(paths) { - t.Fatalf("path index %d is out of range", pathIdx) - } - - schemaPath := paths[pathIdx] - - return GetResponseSchema(t, schemaPath, operation) -} - -func GetResponseSchema(t *testing.T, path *framework.Path, operation logical.Operation) *framework.Response { - t.Helper() - - schemaOperation, ok := path.Operations[operation] - if !ok { - t.Fatalf( - "could not find response schema: %s: %q operation does not exist", - path.Pattern, - operation, - ) - } - - var schemaResponses []framework.Response - - for _, status := range []int{ - http.StatusOK, // 200 - http.StatusAccepted, // 202 - http.StatusNoContent, // 204 - } { - schemaResponses, ok = schemaOperation.Properties().Responses[status] - if ok { - break - } - } - - if len(schemaResponses) == 0 { - t.Fatalf( - "could not find response schema: %s: %q operation: no responses found", - path.Pattern, - operation, - ) - } - - return &schemaResponses[0] -} - -// ResponseValidatingCallback can be used in setting up a [vault.TestCluster] -// that validates every response against the openapi specifications. -// -// [vault.TestCluster]: https://pkg.go.dev/github.com/hashicorp/vault/vault#TestCluster -func ResponseValidatingCallback(t *testing.T) func(logical.Backend, *logical.Request, *logical.Response) { - type PathRouter interface { - Route(string) *framework.Path - } - - return func(b logical.Backend, req *logical.Request, resp *logical.Response) { - t.Helper() - - if b == nil { - t.Fatalf("non-nil backend required") - } - - backend, ok := b.(PathRouter) - if !ok { - t.Fatalf("could not cast %T to have `Route(string) *framework.Path`", b) - } - - // The full request path includes the backend but when passing to the - // backend, we have to trim the mount point: - // `sys/mounts/secret` -> `mounts/secret` - // `auth/token/create` -> `create` - requestPath := strings.TrimPrefix(req.Path, req.MountPoint) - - route := backend.Route(requestPath) - if route == nil { - t.Fatalf("backend %T could not find a route for %s", b, req.Path) - } - - ValidateResponse( - t, - GetResponseSchema(t, route, req.Operation), - resp, - true, - ) - } -} diff --git a/sdk/helper/testhelpers/schema/response_validation_test.go b/sdk/helper/testhelpers/schema/response_validation_test.go deleted file mode 100644 index 4f4aa8b1cc3c6..0000000000000 --- a/sdk/helper/testhelpers/schema/response_validation_test.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package schema - -import ( - "testing" - "time" - - "github.com/hashicorp/vault/sdk/framework" -) - -func TestValidateResponse(t *testing.T) { - cases := map[string]struct { - schema *framework.Response - response map[string]interface{} - strict bool - errorExpected bool - }{ - "nil schema, nil response, strict": { - schema: nil, - response: nil, - strict: true, - errorExpected: false, - }, - - "nil schema, nil response, not strict": { - schema: nil, - response: nil, - strict: false, - errorExpected: false, - }, - - "nil schema, good response, strict": { - schema: nil, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: true, - errorExpected: false, - }, - - "nil schema, good response, not strict": { - schema: nil, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: true, - errorExpected: false, - }, - - "nil schema fields, good response, strict": { - schema: &framework.Response{}, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: true, - errorExpected: false, - }, - - "nil schema fields, good response, not strict": { - schema: &framework.Response{}, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: true, - errorExpected: false, - }, - - "string schema field, string response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - }, - }, - }, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: true, - errorExpected: false, - }, - - "string schema field, string response, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - }, - }, - }, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: false, - errorExpected: false, - }, - - "string schema not required field, empty response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - Required: false, - }, - }, - }, - response: map[string]interface{}{}, - strict: true, - errorExpected: false, - }, - - "string schema required field, empty response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - Required: true, - }, - }, - }, - response: map[string]interface{}{}, - strict: true, - errorExpected: true, - }, - - "string schema required field, empty response, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - Required: true, - }, - }, - }, - response: map[string]interface{}{}, - strict: false, - errorExpected: false, - }, - - "string schema required field, nil response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - Required: true, - }, - }, - }, - response: nil, - strict: true, - errorExpected: true, - }, - - "string schema required field, nil response, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - Required: true, - }, - }, - }, - response: nil, - strict: false, - errorExpected: false, - }, - - "empty schema, string response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{}, - }, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: true, - errorExpected: true, - }, - - "empty schema, string response, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{}, - }, - response: map[string]interface{}{ - "foo": "bar", - }, - strict: false, - errorExpected: false, - }, - - "time schema, string response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "time": { - Type: framework.TypeTime, - Required: true, - }, - }, - }, - response: map[string]interface{}{ - "time": "2024-12-11T09:08:07Z", - }, - strict: true, - errorExpected: false, - }, - - "time schema, string response, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "time": { - Type: framework.TypeTime, - Required: true, - }, - }, - }, - response: map[string]interface{}{ - "time": "2024-12-11T09:08:07Z", - }, - strict: false, - errorExpected: false, - }, - - "time schema, time response, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "time": { - Type: framework.TypeTime, - Required: true, - }, - }, - }, - response: map[string]interface{}{ - "time": time.Date(2024, 12, 11, 9, 8, 7, 0, time.UTC), - }, - strict: true, - errorExpected: false, - }, - - "time schema, time response, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "time": { - Type: framework.TypeTime, - Required: true, - }, - }, - }, - response: map[string]interface{}{ - "time": time.Date(2024, 12, 11, 9, 8, 7, 0, time.UTC), - }, - strict: false, - errorExpected: false, - }, - - "empty schema, response has http_raw_body, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{}, - }, - response: map[string]interface{}{ - "http_raw_body": "foo", - }, - strict: true, - errorExpected: false, - }, - - "empty schema, response has http_raw_body, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{}, - }, - response: map[string]interface{}{ - "http_raw_body": "foo", - }, - strict: false, - errorExpected: false, - }, - - "string schema field, response has non-200 http_status_code, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - }, - }, - }, - response: map[string]interface{}{ - "http_status_code": 304, - }, - strict: true, - errorExpected: false, - }, - - "string schema field, response has non-200 http_status_code, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "foo": { - Type: framework.TypeString, - }, - }, - }, - response: map[string]interface{}{ - "http_status_code": 304, - }, - strict: false, - errorExpected: false, - }, - - "schema has http_raw_body, strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "http_raw_body": { - Type: framework.TypeString, - Required: false, - }, - }, - }, - response: map[string]interface{}{ - "http_raw_body": "foo", - }, - strict: true, - errorExpected: true, - }, - - "schema has http_raw_body, not strict": { - schema: &framework.Response{ - Fields: map[string]*framework.FieldSchema{ - "http_raw_body": { - Type: framework.TypeString, - Required: false, - }, - }, - }, - response: map[string]interface{}{ - "http_raw_body": "foo", - }, - strict: false, - errorExpected: true, - }, - } - - for name, tc := range cases { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - err := validateResponseDataImpl( - tc.schema, - tc.response, - tc.strict, - ) - if err == nil && tc.errorExpected == true { - t.Fatalf("expected an error, got nil") - } - if err != nil && tc.errorExpected == false { - t.Fatalf("unexpected error: %v", err) - } - }) - } -} diff --git a/sdk/helper/tlsutil/tlsutil.go b/sdk/helper/tlsutil/tlsutil.go index d91af3679e2e8..e1e9b9484bf4b 100644 --- a/sdk/helper/tlsutil/tlsutil.go +++ b/sdk/helper/tlsutil/tlsutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package tlsutil diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go index 4319bd1823698..776b40501ed45 100644 --- a/sdk/helper/tokenutil/tokenutil.go +++ b/sdk/helper/tokenutil/tokenutil.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package tokenutil import ( @@ -78,9 +75,8 @@ func TokenFields() map[string]*framework.FieldSchema { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Generated Token's Bound CIDRs", - Group: "Tokens", - Description: "A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.", + Name: "Generated Token's Bound CIDRs", + Group: "Tokens", }, }, @@ -124,9 +120,8 @@ func TokenFields() map[string]*framework.FieldSchema { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Generated Token's Policies", - Group: "Tokens", - Description: "A list of policies that will apply to the generated token for this user.", + Name: "Generated Token's Policies", + Group: "Tokens", }, }, diff --git a/sdk/helper/useragent/useragent.go b/sdk/helper/useragent/useragent.go index 53569e910a87a..e7e23ac2b6f58 100644 --- a/sdk/helper/useragent/useragent.go +++ b/sdk/helper/useragent/useragent.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package useragent import ( @@ -9,6 +6,7 @@ import ( "strings" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/version" ) var ( @@ -17,29 +15,25 @@ var ( // rt is the runtime - variable for tests. rt = runtime.Version() + + // versionFunc is the func that returns the current version. This is a + // function to take into account the different build processes and distinguish + // between enterprise and oss builds. + versionFunc = func() string { + return version.GetVersion().VersionNumber() + } ) // String returns the consistent user-agent string for Vault. -// Deprecated: use PluginString instead. // -// Example output: +// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; go1.10.1) // -// Vault (+https://www.vaultproject.io/; go1.19.5) -// -// Given comments will be appended to the semicolon-delimited comment section: -// -// Vault (+https://www.vaultproject.io/; go1.19.5; comment-0; comment-1) +// Given comments will be appended to the semicolon-delimited comment section. // -// At one point the user-agent string returned contained the Vault -// version hardcoded into the vault/sdk/version/ package. This worked for builtin -// plugins that are compiled into the `vault` binary, in that it correctly described -// the version of that Vault binary. It did not work for external plugins: for them, -// the version will be based on the version stored in the sdk based on the -// contents of the external plugin's go.mod. We've kept the String method around -// to avoid breaking builds, but you should be using PluginString. +// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; go1.10.1; comment-0; comment-1) func String(comments ...string) string { c := append([]string{"+" + projectURL, rt}, comments...) - return fmt.Sprintf("Vault (%s)", strings.Join(c, "; ")) + return fmt.Sprintf("Vault/%s (%s)", versionFunc(), strings.Join(c, "; ")) } // PluginString is usable by plugins to return a user-agent string reflecting diff --git a/sdk/helper/useragent/useragent_test.go b/sdk/helper/useragent/useragent_test.go index 4677bb62face3..f0d014f6c5708 100644 --- a/sdk/helper/useragent/useragent_test.go +++ b/sdk/helper/useragent/useragent_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package useragent import ( @@ -12,6 +9,7 @@ import ( func TestUserAgent(t *testing.T) { projectURL = "https://vault-test.com" rt = "go5.0" + versionFunc = func() string { return "1.2.3" } type args struct { comments []string @@ -24,21 +22,21 @@ func TestUserAgent(t *testing.T) { { name: "User agent", args: args{}, - want: "Vault (+https://vault-test.com; go5.0)", + want: "Vault/1.2.3 (+https://vault-test.com; go5.0)", }, { name: "User agent with additional comment", args: args{ comments: []string{"pid-abcdefg"}, }, - want: "Vault (+https://vault-test.com; go5.0; pid-abcdefg)", + want: "Vault/1.2.3 (+https://vault-test.com; go5.0; pid-abcdefg)", }, { name: "User agent with additional comments", args: args{ comments: []string{"pid-abcdefg", "cloud-provider"}, }, - want: "Vault (+https://vault-test.com; go5.0; pid-abcdefg; cloud-provider)", + want: "Vault/1.2.3 (+https://vault-test.com; go5.0; pid-abcdefg; cloud-provider)", }, } for _, tt := range tests { diff --git a/sdk/helper/wrapping/wrapinfo.go b/sdk/helper/wrapping/wrapinfo.go index 03a703013008d..8d8e63340f959 100644 --- a/sdk/helper/wrapping/wrapinfo.go +++ b/sdk/helper/wrapping/wrapinfo.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package wrapping import "time" diff --git a/sdk/helper/xor/xor.go b/sdk/helper/xor/xor.go index 098a673178558..a1f1e90bc156f 100644 --- a/sdk/helper/xor/xor.go +++ b/sdk/helper/xor/xor.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package xor import ( diff --git a/sdk/helper/xor/xor_test.go b/sdk/helper/xor/xor_test.go index 143345d9a5bde..f50f525ce6390 100644 --- a/sdk/helper/xor/xor_test.go +++ b/sdk/helper/xor/xor_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package xor import ( diff --git a/sdk/logical/acme_billing.go b/sdk/logical/acme_billing.go deleted file mode 100644 index 6e4f6ef398b83..0000000000000 --- a/sdk/logical/acme_billing.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package logical - -import "context" - -type ACMEBillingSystemView interface { - CreateActivityCountEventForIdentifiers(ctx context.Context, identifiers []string) error -} diff --git a/sdk/logical/audit.go b/sdk/logical/audit.go index 30c03e6113ac2..8ba70f37e01a4 100644 --- a/sdk/logical/audit.go +++ b/sdk/logical/audit.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical type LogInput struct { diff --git a/sdk/logical/auth.go b/sdk/logical/auth.go index 83d9daca12adc..51de20345fddd 100644 --- a/sdk/logical/auth.go +++ b/sdk/logical/auth.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/connection.go b/sdk/logical/connection.go index e590e6f59acc1..5be8630770794 100644 --- a/sdk/logical/connection.go +++ b/sdk/logical/connection.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/controlgroup.go b/sdk/logical/controlgroup.go index e166f00d1f884..2ed1b07688d9a 100644 --- a/sdk/logical/controlgroup.go +++ b/sdk/logical/controlgroup.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/error.go b/sdk/logical/error.go index 5605784b3e132..02f68dd9189c4 100644 --- a/sdk/logical/error.go +++ b/sdk/logical/error.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import "errors" @@ -20,11 +17,6 @@ var ( // ErrPermissionDenied is returned if the client is not authorized ErrPermissionDenied = errors.New("permission denied") - // ErrInvalidCredentials is returned when the provided credentials are incorrect - // This is used internally for user lockout purposes. This is not seen externally. - // The status code returned does not change because of this error - ErrInvalidCredentials = errors.New("invalid credentials") - // ErrMultiAuthzPending is returned if the the request needs more // authorizations ErrMultiAuthzPending = errors.New("request needs further approval") diff --git a/sdk/logical/event.pb.go b/sdk/logical/event.pb.go deleted file mode 100644 index 22e908d91a343..0000000000000 --- a/sdk/logical/event.pb.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 -// source: sdk/logical/event.proto - -package logical - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// EventPluginInfo contains data related to the plugin that generated an event. -type EventPluginInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of plugin this event originated from, i.e., "auth" or "secrets. - MountClass string `protobuf:"bytes,1,opt,name=mount_class,json=mountClass,proto3" json:"mount_class,omitempty"` - // Unique ID of the mount entry, e.g., "kv_957bb7d8" - MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` - // Mount path of the plugin this event originated from, e.g., "secret/" - MountPath string `protobuf:"bytes,3,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` - // Plugin name that this event originated from, e.g., "kv" - Plugin string `protobuf:"bytes,4,opt,name=plugin,proto3" json:"plugin,omitempty"` - // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" - PluginVersion string `protobuf:"bytes,5,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` - // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *EventPluginInfo) Reset() { - *x = EventPluginInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_event_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EventPluginInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EventPluginInfo) ProtoMessage() {} - -func (x *EventPluginInfo) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_event_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EventPluginInfo.ProtoReflect.Descriptor instead. -func (*EventPluginInfo) Descriptor() ([]byte, []int) { - return file_sdk_logical_event_proto_rawDescGZIP(), []int{0} -} - -func (x *EventPluginInfo) GetMountClass() string { - if x != nil { - return x.MountClass - } - return "" -} - -func (x *EventPluginInfo) GetMountAccessor() string { - if x != nil { - return x.MountAccessor - } - return "" -} - -func (x *EventPluginInfo) GetMountPath() string { - if x != nil { - return x.MountPath - } - return "" -} - -func (x *EventPluginInfo) GetPlugin() string { - if x != nil { - return x.Plugin - } - return "" -} - -func (x *EventPluginInfo) GetPluginVersion() string { - if x != nil { - return x.PluginVersion - } - return "" -} - -func (x *EventPluginInfo) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -// EventData contains event data in a CloudEvents container. -type EventData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID identifies the event. It is required. The combination of - // CloudEvents Source (i.e., Vault cluster) + ID must be unique. - // Events with the same Source + ID can be assumed to be duplicates - // by consumers. - // Be careful when setting this manually that the ID contains enough - // entropy to be unique, or possibly that it is idempotent, such - // as a hash of other fields with sufficient uniqueness. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Arbitrary non-secret data. Optional. - Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` - // Any IDs that the event relates to, i.e., UUIDs, paths. - EntityIds []string `protobuf:"bytes,3,rep,name=entity_ids,json=entityIds,proto3" json:"entity_ids,omitempty"` - // Human-readable note. - Note string `protobuf:"bytes,4,opt,name=note,proto3" json:"note,omitempty"` -} - -func (x *EventData) Reset() { - *x = EventData{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_event_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EventData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EventData) ProtoMessage() {} - -func (x *EventData) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_event_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EventData.ProtoReflect.Descriptor instead. -func (*EventData) Descriptor() ([]byte, []int) { - return file_sdk_logical_event_proto_rawDescGZIP(), []int{1} -} - -func (x *EventData) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *EventData) GetMetadata() *structpb.Struct { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *EventData) GetEntityIds() []string { - if x != nil { - return x.EntityIds - } - return nil -} - -func (x *EventData) GetNote() string { - if x != nil { - return x.Note - } - return "" -} - -// EventReceived is used to consume events and includes additional metadata regarding -// the event type and plugin information. -type EventReceived struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Event *EventData `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - // namespace path - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - EventType string `protobuf:"bytes,3,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` - PluginInfo *EventPluginInfo `protobuf:"bytes,4,opt,name=plugin_info,json=pluginInfo,proto3" json:"plugin_info,omitempty"` -} - -func (x *EventReceived) Reset() { - *x = EventReceived{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_event_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EventReceived) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EventReceived) ProtoMessage() {} - -func (x *EventReceived) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_event_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EventReceived.ProtoReflect.Descriptor instead. -func (*EventReceived) Descriptor() ([]byte, []int) { - return file_sdk_logical_event_proto_rawDescGZIP(), []int{2} -} - -func (x *EventReceived) GetEvent() *EventData { - if x != nil { - return x.Event - } - return nil -} - -func (x *EventReceived) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *EventReceived) GetEventType() string { - if x != nil { - return x.EventType - } - return "" -} - -func (x *EventReceived) GetPluginInfo() *EventPluginInfo { - if x != nil { - return x.PluginInfo - } - return nil -} - -var File_sdk_logical_event_proto protoreflect.FileDescriptor - -var file_sdk_logical_event_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, 0x63, - 0x61, 0x6c, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xd1, 0x01, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, - 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x0d, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x05, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, - 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, - 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x6e, - 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, - 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x28, - 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, - 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sdk_logical_event_proto_rawDescOnce sync.Once - file_sdk_logical_event_proto_rawDescData = file_sdk_logical_event_proto_rawDesc -) - -func file_sdk_logical_event_proto_rawDescGZIP() []byte { - file_sdk_logical_event_proto_rawDescOnce.Do(func() { - file_sdk_logical_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_event_proto_rawDescData) - }) - return file_sdk_logical_event_proto_rawDescData -} - -var file_sdk_logical_event_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_sdk_logical_event_proto_goTypes = []interface{}{ - (*EventPluginInfo)(nil), // 0: logical.EventPluginInfo - (*EventData)(nil), // 1: logical.EventData - (*EventReceived)(nil), // 2: logical.EventReceived - (*structpb.Struct)(nil), // 3: google.protobuf.Struct -} -var file_sdk_logical_event_proto_depIdxs = []int32{ - 3, // 0: logical.EventData.metadata:type_name -> google.protobuf.Struct - 1, // 1: logical.EventReceived.event:type_name -> logical.EventData - 0, // 2: logical.EventReceived.plugin_info:type_name -> logical.EventPluginInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_sdk_logical_event_proto_init() } -func file_sdk_logical_event_proto_init() { - if File_sdk_logical_event_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sdk_logical_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EventPluginInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EventData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_event_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EventReceived); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sdk_logical_event_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_sdk_logical_event_proto_goTypes, - DependencyIndexes: file_sdk_logical_event_proto_depIdxs, - MessageInfos: file_sdk_logical_event_proto_msgTypes, - }.Build() - File_sdk_logical_event_proto = out.File - file_sdk_logical_event_proto_rawDesc = nil - file_sdk_logical_event_proto_goTypes = nil - file_sdk_logical_event_proto_depIdxs = nil -} diff --git a/sdk/logical/event.proto b/sdk/logical/event.proto deleted file mode 100644 index 6e36e5e70f9ac..0000000000000 --- a/sdk/logical/event.proto +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; - -option go_package = "github.com/hashicorp/vault/sdk/logical"; - -package logical; - -import "google/protobuf/struct.proto"; - -// EventPluginInfo contains data related to the plugin that generated an event. -message EventPluginInfo { - // The type of plugin this event originated from, i.e., "auth" or "secrets. - string mount_class = 1; - // Unique ID of the mount entry, e.g., "kv_957bb7d8" - string mount_accessor = 2; - // Mount path of the plugin this event originated from, e.g., "secret/" - string mount_path = 3; - // Plugin name that this event originated from, e.g., "kv" - string plugin = 4; - // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" - string plugin_version = 5; - // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. - string version = 6; -} - -// EventData contains event data in a CloudEvents container. -message EventData { - // ID identifies the event. It is required. The combination of - // CloudEvents Source (i.e., Vault cluster) + ID must be unique. - // Events with the same Source + ID can be assumed to be duplicates - // by consumers. - // Be careful when setting this manually that the ID contains enough - // entropy to be unique, or possibly that it is idempotent, such - // as a hash of other fields with sufficient uniqueness. - string id = 1; - // Arbitrary non-secret data. Optional. - google.protobuf.Struct metadata = 2; - // Any IDs that the event relates to, i.e., UUIDs, paths. - repeated string entity_ids = 3; - // Human-readable note. - string note = 4; -} - -// EventReceived is used to consume events and includes additional metadata regarding -// the event type and plugin information. -message EventReceived { - EventData event = 1; - // namespace path - string namespace = 2; - string event_type = 3; - EventPluginInfo plugin_info = 4; -} diff --git a/sdk/logical/events.go b/sdk/logical/events.go deleted file mode 100644 index cbd3f73690244..0000000000000 --- a/sdk/logical/events.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package logical - -import ( - "context" - - "github.com/hashicorp/go-uuid" -) - -// ID is an alias to GetId() for CloudEvents compatibility. -func (x *EventReceived) ID() string { - return x.Event.GetId() -} - -// NewEvent returns an event with a new, random EID. -func NewEvent() (*EventData, error) { - id, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - return &EventData{ - Id: id, - }, nil -} - -// EventType represents a topic, and is a wrapper around eventlogger.EventType. -type EventType string - -// EventSender sends events to the common event bus. -type EventSender interface { - Send(ctx context.Context, eventType EventType, event *EventData) error -} diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go index fedc5f5c202cb..6c1c4b2c9b903 100644 --- a/sdk/logical/identity.pb.go +++ b/sdk/logical/identity.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/logical/identity.proto package logical @@ -321,7 +318,6 @@ type MFAMethodID struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` UsesPasscode bool `protobuf:"varint,3,opt,name=uses_passcode,json=usesPasscode,proto3" json:"uses_passcode,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } func (x *MFAMethodID) Reset() { @@ -377,13 +373,6 @@ func (x *MFAMethodID) GetUsesPasscode() bool { return false } -func (x *MFAMethodID) GetName() string { - if x != nil { - return x.Name - } - return "" -} - type MFAConstraintAny struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -548,35 +537,34 @@ var file_sdk_logical_identity_proto_rawDesc = []byte{ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x6a, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, + 0x01, 0x22, 0x56, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65, - 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, - 0x10, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, - 0x79, 0x12, 0x26, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x49, 0x44, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, - 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, - 0x6d, 0x66, 0x61, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, - 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, - 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, - 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, - 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, - 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x4d, 0x46, 0x41, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x12, 0x26, 0x0a, + 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67, + 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, + 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x66, 0x61, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x54, + 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, + 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, + 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, + 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, + 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/sdk/logical/identity.proto b/sdk/logical/identity.proto index 4a1f3413750d3..ea2e373b18c6d 100644 --- a/sdk/logical/identity.proto +++ b/sdk/logical/identity.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/sdk/logical"; @@ -82,7 +79,6 @@ message MFAMethodID { string type = 1; string id = 2; bool uses_passcode = 3; - string name = 4; } message MFAConstraintAny { diff --git a/sdk/logical/lease.go b/sdk/logical/lease.go index e00fb52d64b17..97bbe4f6582bc 100644 --- a/sdk/logical/lease.go +++ b/sdk/logical/lease.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/lease_test.go b/sdk/logical/lease_test.go index aee2bbdbcb3bf..050b7db8e92bd 100644 --- a/sdk/logical/lease_test.go +++ b/sdk/logical/lease_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/logical.go b/sdk/logical/logical.go index 51928d6e67e1f..601148952f0fb 100644 --- a/sdk/logical/logical.go +++ b/sdk/logical/logical.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( @@ -108,9 +105,6 @@ type BackendConfig struct { // Config is the opaque user configuration provided when mounting Config map[string]string - - // EventsSender provides a mechanism to interact with Vault events. - EventsSender EventSender } // Factory is the factory function to create a logical backend. @@ -137,28 +131,6 @@ type Paths struct { // should be seal wrapped with extra encryption. It is exact matching // unless it ends with '/' in which case it will be treated as a prefix. SealWrapStorage []string - - // WriteForwardedStorage are storage paths that, when running on a PR - // Secondary cluster, cause a GRPC call up to the PR Primary cluster's - // active node to handle storage.Put(...) and storage.Delete(...) events. - // These paths MUST include a {{clusterId}} literal, which the write layer - // will resolve to this cluster's UUID ("replication set" identifier). - // storage.List(...) and storage.Get(...) operations occur from the - // locally replicated data set, but can use path template expansion to be - // identifier agnostic. - // - // These paths require careful considerations by developers to use. In - // particular, writes on secondary clusters will not appear (when a - // corresponding read is issued immediately after a write) until the - // replication from primary->secondary has occurred. This replication - // triggers an InvalidateKey(...) call on the secondary, which can be - // used to detect the write has finished syncing. However, this will - // likely occur after the request has finished, so it is important to - // not block on this occurring. - // - // On standby nodes, like all storage write operations, this will trigger - // an ErrReadOnly return. - WriteForwardedStorage []string } type Auditor interface { @@ -166,6 +138,11 @@ type Auditor interface { AuditResponse(ctx context.Context, input *LogInput) error } +// Externaler allows us to check if a backend is running externally (i.e., over GRPC) +type Externaler interface { + IsExternal() bool +} + type PluginVersion struct { Version string } diff --git a/sdk/logical/logical_storage.go b/sdk/logical/logical_storage.go index b4fbc2b72fdb3..16b85cd797e0e 100644 --- a/sdk/logical/logical_storage.go +++ b/sdk/logical/logical_storage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/managed_key.go b/sdk/logical/managed_key.go index 04727f9d7f424..e892c9cce9483 100644 --- a/sdk/logical/managed_key.go +++ b/sdk/logical/managed_key.go @@ -1,14 +1,10 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( "context" "crypto" + "crypto/cipher" "io" - - wrapping "github.com/hashicorp/go-kms-wrapping/v2" ) type KeyUsage int @@ -20,7 +16,6 @@ const ( KeyUsageVerify KeyUsageWrap KeyUsageUnwrap - KeyUsageGenerateRandom ) type ManagedKey interface { @@ -39,11 +34,9 @@ type ManagedKey interface { } type ( - ManagedKeyConsumer func(context.Context, ManagedKey) error - ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error - ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error - ManagedMACKeyConsumer func(context.Context, ManagedMACKey) error - ManagedKeyRandomSourceConsumer func(context.Context, ManagedKeyRandomSource) error + ManagedKeyConsumer func(context.Context, ManagedKey) error + ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error + ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error ) type ManagedKeySystemView interface { @@ -66,12 +59,6 @@ type ManagedKeySystemView interface { // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, // with the same semantics as WithManagedKeyByUUID WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error - // WithManagedMACKeyByName retrieves an instantiated managed MAC key by name for consumption by the given function, - // with the same semantics as WithManagedKeyByName. - WithManagedMACKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedMACKeyConsumer) error - // WithManagedMACKeyByUUID retrieves an instantiated managed MAC key by UUID for consumption by the given function, - // with the same semantics as WithManagedKeyByUUID. - WithManagedMACKeyByUUID(ctx context.Context, keyUUID, backendUUID string, f ManagedMACKeyConsumer) error } type ManagedAsymmetricKey interface { @@ -106,20 +93,5 @@ type ManagedSigningKey interface { type ManagedEncryptingKey interface { ManagedKey - Encrypt(ctx context.Context, plaintext []byte, options ...wrapping.Option) ([]byte, error) - Decrypt(ctx context.Context, ciphertext []byte, options ...wrapping.Option) ([]byte, error) -} - -type ManagedMACKey interface { - ManagedKey - - // MAC generates a MAC tag using the provided algorithm for the provided value. - MAC(ctx context.Context, algorithm string, data []byte) ([]byte, error) -} - -type ManagedKeyRandomSource interface { - ManagedKey - - // GetRandomBytes returns a number (specified by the count parameter) of random bytes sourced from the target managed key. - GetRandomBytes(count int) ([]byte, error) + GetAEAD(iv []byte) (cipher.AEAD, error) } diff --git a/sdk/logical/plugin.pb.go b/sdk/logical/plugin.pb.go index 19b18d89e1862..03be5d3cba056 100644 --- a/sdk/logical/plugin.pb.go +++ b/sdk/logical/plugin.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/logical/plugin.proto package logical diff --git a/sdk/logical/plugin.proto b/sdk/logical/plugin.proto index 0eaa3c57c8492..f2df6c75d97c3 100644 --- a/sdk/logical/plugin.proto +++ b/sdk/logical/plugin.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; option go_package = "github.com/hashicorp/vault/sdk/logical"; diff --git a/sdk/logical/request.go b/sdk/logical/request.go index 8a6ac241fe803..d774fd176b4a7 100644 --- a/sdk/logical/request.go +++ b/sdk/logical/request.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( @@ -156,22 +153,6 @@ type Request struct { // backends can be tied to the mount it belongs to. MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` - // mountRunningVersion is used internally to propagate the semantic version - // of the mounted plugin as reported by its vault.MountEntry to audit logging - mountRunningVersion string - - // mountRunningSha256 is used internally to propagate the encoded sha256 - // of the mounted plugin as reported its vault.MountEntry to audit logging - mountRunningSha256 string - - // mountIsExternalPlugin is used internally to propagate whether - // the backend of the mounted plugin is running externally (i.e., over GRPC) - // to audit logging - mountIsExternalPlugin bool - - // mountClass is used internally to propagate the mount class of the mounted plugin to audit logging - mountClass string - // WrapInfo contains requested response wrapping parameters WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` @@ -243,9 +224,6 @@ type Request struct { // InboundSSCToken is the token that arrives on an inbound request, supplied // by the vault user. InboundSSCToken string - - // When a request has been forwarded, contains information of the host the request was forwarded 'from' - ForwardedFrom string `json:"forwarded_from,omitempty"` } // Clone returns a deep copy of the request by using copystructure @@ -302,38 +280,6 @@ func (r *Request) SentinelKeys() []string { } } -func (r *Request) MountRunningVersion() string { - return r.mountRunningVersion -} - -func (r *Request) SetMountRunningVersion(mountRunningVersion string) { - r.mountRunningVersion = mountRunningVersion -} - -func (r *Request) MountRunningSha256() string { - return r.mountRunningSha256 -} - -func (r *Request) SetMountRunningSha256(mountRunningSha256 string) { - r.mountRunningSha256 = mountRunningSha256 -} - -func (r *Request) MountIsExternalPlugin() bool { - return r.mountIsExternalPlugin -} - -func (r *Request) SetMountIsExternalPlugin(mountIsExternalPlugin bool) { - r.mountIsExternalPlugin = mountIsExternalPlugin -} - -func (r *Request) MountClass() string { - return r.mountClass -} - -func (r *Request) SetMountClass(mountClass string) { - r.mountClass = mountClass -} - func (r *Request) LastRemoteWAL() uint64 { return r.lastRemoteWAL } @@ -420,7 +366,6 @@ const ( HelpOperation = "help" AliasLookaheadOperation = "alias-lookahead" ResolveRoleOperation = "resolve-role" - HeaderOperation = "header" // The operations below are called globally, the path is less relevant. RevokeOperation Operation = "revoke" diff --git a/sdk/logical/response.go b/sdk/logical/response.go index 9ea5bf6c57274..19194f524871c 100644 --- a/sdk/logical/response.go +++ b/sdk/logical/response.go @@ -1,14 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( - "bufio" "encoding/json" "errors" "fmt" - "net" "net/http" "strconv" "sync/atomic" @@ -97,8 +92,7 @@ func (r *Response) AddWarning(warning string) { // IsError returns true if this response seems to indicate an error. func (r *Response) IsError() bool { - // If the response data contains only an 'error' element, or an 'error' and a 'data' element only - return r != nil && r.Data != nil && r.Data["error"] != nil && (len(r.Data) == 1 || (r.Data["data"] != nil && len(r.Data) == 2)) + return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil } func (r *Response) Error() error { @@ -247,13 +241,6 @@ func NewStatusHeaderResponseWriter(w http.ResponseWriter, h map[string][]*Custom } } -func (w *StatusHeaderResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if h, ok := w.wrapped.(http.Hijacker); ok { - return h.Hijack() - } - return nil, nil, fmt.Errorf("could not hijack because wrapped connection is %T and it does not implement http.Hijacker", w.wrapped) -} - func (w *StatusHeaderResponseWriter) Wrapped() http.ResponseWriter { return w.wrapped } diff --git a/sdk/logical/response_util.go b/sdk/logical/response_util.go index aef0213ed9e82..7454189f1d7d7 100644 --- a/sdk/logical/response_util.go +++ b/sdk/logical/response_util.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( @@ -20,7 +17,7 @@ import ( func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { if err == nil && (resp == nil || !resp.IsError()) { switch { - case req.Operation == ReadOperation || req.Operation == HeaderOperation: + case req.Operation == ReadOperation: if resp == nil { return http.StatusNotFound, nil } @@ -76,21 +73,10 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { var allErrors error var codedErr *ReplicationCodedError errwrap.Walk(err, func(inErr error) { - // The Walk function does not just traverse leaves, and execute the - // callback function on the entire error first. So, if the error is - // of type multierror.Error, we may want to skip storing the entire - // error first to avoid adding duplicate errors when walking down - // the leaf errors - if _, ok := inErr.(*multierror.Error); ok { - return - } newErr, ok := inErr.(*ReplicationCodedError) if ok { codedErr = newErr } else { - // if the error is of type fmt.wrapError which is typically - // made by calling fmt.Errorf("... %w", err), allErrors will - // contain duplicated error messages allErrors = multierror.Append(allErrors, inErr) } }) @@ -136,8 +122,6 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { statusCode = http.StatusNotFound case errwrap.Contains(err, ErrRelativePath.Error()): statusCode = http.StatusBadRequest - case errwrap.Contains(err, ErrInvalidCredentials.Error()): - statusCode = http.StatusBadRequest } } @@ -196,23 +180,3 @@ func RespondError(w http.ResponseWriter, status int, err error) { enc := json.NewEncoder(w) enc.Encode(resp) } - -func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { - AdjustErrorStatusCode(&status, err) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(status) - - type ErrorAndDataResponse struct { - Errors []string `json:"errors"` - Data interface{} `json:"data""` - } - resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} - if err != nil { - resp.Errors = append(resp.Errors, err.Error()) - } - resp.Data = data - - enc := json.NewEncoder(w) - enc.Encode(resp) -} diff --git a/sdk/logical/response_util_test.go b/sdk/logical/response_util_test.go index eafaa2fc76109..823c4afbdc217 100644 --- a/sdk/logical/response_util_test.go +++ b/sdk/logical/response_util_test.go @@ -1,10 +1,6 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( - "errors" "strings" "testing" ) @@ -42,14 +38,6 @@ func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { respErr: nil, expectedStatus: 404, }, - { - title: "Header not found", - req: &Request{ - Operation: HeaderOperation, - }, - respErr: nil, - expectedStatus: 404, - }, { title: "List with response and no keys", req: &Request{ @@ -72,17 +60,6 @@ func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { respErr: nil, expectedStatus: 0, }, - { - title: "Invalid Credentials error ", - respErr: ErrInvalidCredentials, - resp: &Response{ - Data: map[string]interface{}{ - "error": "error due to wrong credentials", - }, - }, - expectedErr: errors.New("error due to wrong credentials"), - expectedStatus: 400, - }, } for _, tc := range testCases { diff --git a/sdk/logical/secret.go b/sdk/logical/secret.go index e6b4d14d4732b..a2128d8689946 100644 --- a/sdk/logical/secret.go +++ b/sdk/logical/secret.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import "fmt" diff --git a/sdk/logical/storage.go b/sdk/logical/storage.go index 16ba60b94875c..0802ad01a0f62 100644 --- a/sdk/logical/storage.go +++ b/sdk/logical/storage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( @@ -23,11 +20,6 @@ var ErrReadOnly = errors.New("cannot write to readonly storage") // storage while the backend is still being setup. var ErrSetupReadOnly = errors.New("cannot write to storage during setup") -// Plugins using Paths.WriteForwardedStorage will need to use this sentinel -// in their path to write cross-cluster. See the description of that parameter -// for more information. -const PBPWFClusterSentinel = "{{clusterId}}" - // Storage is the way that logical backends are able read/write data. type Storage interface { List(context.Context, string) ([]string, error) diff --git a/sdk/logical/storage_inmem.go b/sdk/logical/storage_inmem.go index 62ec58290a4b6..65368a070fe45 100644 --- a/sdk/logical/storage_inmem.go +++ b/sdk/logical/storage_inmem.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/storage_inmem_test.go b/sdk/logical/storage_inmem_test.go index 2ed776b20c38c..8e0964fd4af8f 100644 --- a/sdk/logical/storage_inmem_test.go +++ b/sdk/logical/storage_inmem_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/storage_test.go b/sdk/logical/storage_test.go index 1d6014dd9769c..3b96b4dbef340 100644 --- a/sdk/logical/storage_test.go +++ b/sdk/logical/storage_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/storage_view.go b/sdk/logical/storage_view.go index df40dca4fc53a..2cd07715c2ae4 100644 --- a/sdk/logical/storage_view.go +++ b/sdk/logical/storage_view.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go index 7301c752a1625..4e5627b1c8805 100644 --- a/sdk/logical/system_view.go +++ b/sdk/logical/system_view.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( @@ -86,17 +83,9 @@ type SystemView interface { // PluginEnv returns Vault environment information used by plugins PluginEnv(context.Context) (*PluginEnvironment, error) - // VaultVersion returns the version string for the currently running Vault. - VaultVersion(context.Context) (string, error) - // GeneratePasswordFromPolicy generates a password from the policy referenced. // If the policy does not exist, this will return an error. GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) - - // ClusterID returns the replication ClusterID, for use with path-based - // write forwarding (WriteForwardedPaths). This value will be templated - // in for the {{cluterId}} sentinel. - ClusterID(ctx context.Context) (string, error) } type PasswordPolicy interface { @@ -124,10 +113,9 @@ type StaticSystemView struct { EntityVal *Entity GroupsVal []*Group Features license.Features + VaultVersion string PluginEnvironment *PluginEnvironment PasswordPolicies map[string]PasswordGenerator - VersionString string - ClusterUUID string } type noopAuditor struct{} @@ -216,10 +204,6 @@ func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, erro return d.PluginEnvironment, nil } -func (d StaticSystemView) VaultVersion(_ context.Context) (string, error) { - return d.VersionString, nil -} - func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { select { case <-ctx.Done(): @@ -249,7 +233,3 @@ func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { delete(d.PasswordPolicies, name) return existed } - -func (d StaticSystemView) ClusterID(ctx context.Context) (string, error) { - return d.ClusterUUID, nil -} diff --git a/sdk/logical/testing.go b/sdk/logical/testing.go index a173c7c5f7b29..765f09826d4f7 100644 --- a/sdk/logical/testing.go +++ b/sdk/logical/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( @@ -76,7 +73,6 @@ func TestSystemView() *StaticSystemView { return &StaticSystemView{ DefaultLeaseTTLVal: defaultLeaseTTLVal, MaxLeaseTTLVal: maxLeaseTTLVal, - VersionString: "testVersionString", } } diff --git a/sdk/logical/token.go b/sdk/logical/token.go index a27a73a22dc27..ebebd4ad9ca7a 100644 --- a/sdk/logical/token.go +++ b/sdk/logical/token.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/token_test.go b/sdk/logical/token_test.go index 641d688b9dd1b..e44c707a51659 100644 --- a/sdk/logical/token_test.go +++ b/sdk/logical/token_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/translate_response.go b/sdk/logical/translate_response.go index ef5ba5f220724..de5ea8fdbe214 100644 --- a/sdk/logical/translate_response.go +++ b/sdk/logical/translate_response.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package logical import ( diff --git a/sdk/logical/version.pb.go b/sdk/logical/version.pb.go index 9962824cbb15d..7845aeaf5c372 100644 --- a/sdk/logical/version.pb.go +++ b/sdk/logical/version.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/logical/version.proto package logical diff --git a/sdk/logical/version.proto b/sdk/logical/version.proto index 860ddc54e2707..345051ae9de95 100644 --- a/sdk/logical/version.proto +++ b/sdk/logical/version.proto @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; package logical; diff --git a/sdk/physical/cache.go b/sdk/physical/cache.go index cc318a4c0eb52..ffac33189bbc0 100644 --- a/sdk/physical/cache.go +++ b/sdk/physical/cache.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( @@ -32,6 +29,7 @@ var cacheExceptionsPaths = []string{ "sys/expire/", "core/poison-pill", "core/raft/tls", + "core/license", } // CacheRefreshContext returns a context with an added value denoting if the diff --git a/sdk/physical/encoding.go b/sdk/physical/encoding.go index 49e00ae6ace58..dbde84cc6dc46 100644 --- a/sdk/physical/encoding.go +++ b/sdk/physical/encoding.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/entry.go b/sdk/physical/entry.go index 1d907425dc08d..389fe6c81c144 100644 --- a/sdk/physical/entry.go +++ b/sdk/physical/entry.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/error.go b/sdk/physical/error.go index 4af7b7d639fcc..b547e4e4288d4 100644 --- a/sdk/physical/error.go +++ b/sdk/physical/error.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/file/file.go b/sdk/physical/file/file.go index d7ad9de3a2b09..e5e64e6efa41f 100644 --- a/sdk/physical/file/file.go +++ b/sdk/physical/file/file.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( diff --git a/sdk/physical/file/file_test.go b/sdk/physical/file/file_test.go index 14c33094dec48..724b8a012a66b 100644 --- a/sdk/physical/file/file_test.go +++ b/sdk/physical/file/file_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package file import ( diff --git a/sdk/physical/inmem/cache_test.go b/sdk/physical/inmem/cache_test.go index 3014fc1768da9..e6e6dabfe3760 100644 --- a/sdk/physical/inmem/cache_test.go +++ b/sdk/physical/inmem/cache_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/inmem/inmem.go b/sdk/physical/inmem/inmem.go index e4fa1f69ba231..be16b4caa12fc 100644 --- a/sdk/physical/inmem/inmem.go +++ b/sdk/physical/inmem/inmem.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/inmem/inmem_ha.go b/sdk/physical/inmem/inmem_ha.go index 1db26ca7461f3..64fcb3a66dce3 100644 --- a/sdk/physical/inmem/inmem_ha.go +++ b/sdk/physical/inmem/inmem_ha.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/inmem/inmem_ha_test.go b/sdk/physical/inmem/inmem_ha_test.go index bb427a385e991..850d63a230be1 100644 --- a/sdk/physical/inmem/inmem_ha_test.go +++ b/sdk/physical/inmem/inmem_ha_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/inmem/inmem_test.go b/sdk/physical/inmem/inmem_test.go index 56c029a43303b..678061326a218 100644 --- a/sdk/physical/inmem/inmem_test.go +++ b/sdk/physical/inmem/inmem_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/inmem/physical_view_test.go b/sdk/physical/inmem/physical_view_test.go index 24b47d7ae7ec9..ea4a3ce24f187 100644 --- a/sdk/physical/inmem/physical_view_test.go +++ b/sdk/physical/inmem/physical_view_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/inmem/transactions_test.go b/sdk/physical/inmem/transactions_test.go index 71a4829f96647..7ed3d59492412 100644 --- a/sdk/physical/inmem/transactions_test.go +++ b/sdk/physical/inmem/transactions_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package inmem import ( diff --git a/sdk/physical/latency.go b/sdk/physical/latency.go index f4cced5270b19..c00780598cf47 100644 --- a/sdk/physical/latency.go +++ b/sdk/physical/latency.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/physical.go b/sdk/physical/physical.go index c0e7d2ef8895c..808abd50fcd85 100644 --- a/sdk/physical/physical.go +++ b/sdk/physical/physical.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/physical_access.go b/sdk/physical/physical_access.go index 048ee83856670..7497313afca22 100644 --- a/sdk/physical/physical_access.go +++ b/sdk/physical/physical_access.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/physical_view.go b/sdk/physical/physical_view.go index 0369e13778a04..189ac93172a53 100644 --- a/sdk/physical/physical_view.go +++ b/sdk/physical/physical_view.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/testing.go b/sdk/physical/testing.go index 0c6a021d3d237..6e0ddfcc0eae0 100644 --- a/sdk/physical/testing.go +++ b/sdk/physical/testing.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/physical/transactions.go b/sdk/physical/transactions.go index 8d4e33321e2cb..a943c6bd95efa 100644 --- a/sdk/physical/transactions.go +++ b/sdk/physical/transactions.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package physical import ( diff --git a/sdk/plugin/backend.go b/sdk/plugin/backend.go index 2da1378eaa91a..46e3710fdbace 100644 --- a/sdk/plugin/backend.go +++ b/sdk/plugin/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/grpc_backend.go b/sdk/plugin/grpc_backend.go index f0114b9465790..a65eeebeb4322 100644 --- a/sdk/plugin/grpc_backend.go +++ b/sdk/plugin/grpc_backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go index a343356d19d86..91503f176e703 100644 --- a/sdk/plugin/grpc_backend_client.go +++ b/sdk/plugin/grpc_backend_client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -127,11 +124,10 @@ func (b *backendGRPCPluginClient) SpecialPaths() *logical.Paths { } return &logical.Paths{ - Root: reply.Paths.Root, - Unauthenticated: reply.Paths.Unauthenticated, - LocalStorage: reply.Paths.LocalStorage, - SealWrapStorage: reply.Paths.SealWrapStorage, - WriteForwardedStorage: reply.Paths.WriteForwardedStorage, + Root: reply.Paths.Root, + Unauthenticated: reply.Paths.Unauthenticated, + LocalStorage: reply.Paths.LocalStorage, + SealWrapStorage: reply.Paths.SealWrapStorage, } } @@ -231,10 +227,6 @@ func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.Bac impl: sysViewImpl, } - events := &GRPCEventsServer{ - impl: config.EventsSender, - } - // Register the server in this closure. serverFunc := func(opts []grpc.ServerOption) *grpc.Server { opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) @@ -243,7 +235,6 @@ func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.Bac s := grpc.NewServer(opts...) pb.RegisterSystemViewServer(s, sysView) pb.RegisterStorageServer(s, storage) - pb.RegisterEventsServer(s, events) b.server.Store(s) close(b.cleanupCh) return s diff --git a/sdk/plugin/grpc_backend_server.go b/sdk/plugin/grpc_backend_server.go index 3356e463c8ad8..361f3171f6734 100644 --- a/sdk/plugin/grpc_backend_server.go +++ b/sdk/plugin/grpc_backend_server.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -95,15 +92,13 @@ func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) storage := newGRPCStorageClient(brokeredClient) sysView := newGRPCSystemView(brokeredClient) - events := newGRPCEventsClient(brokeredClient) config := &logical.BackendConfig{ - StorageView: storage, - Logger: b.logger, - System: sysView, - Config: args.Config, - BackendUUID: args.BackendUUID, - EventsSender: events, + StorageView: storage, + Logger: b.logger, + System: sysView, + Config: args.Config, + BackendUUID: args.BackendUUID, } // Call the underlying backend factory after shims have been created @@ -191,11 +186,10 @@ func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Emp return &pb.SpecialPathsReply{ Paths: &pb.Paths{ - Root: paths.Root, - Unauthenticated: paths.Unauthenticated, - LocalStorage: paths.LocalStorage, - SealWrapStorage: paths.SealWrapStorage, - WriteForwardedStorage: paths.WriteForwardedStorage, + Root: paths.Root, + Unauthenticated: paths.Unauthenticated, + LocalStorage: paths.LocalStorage, + SealWrapStorage: paths.SealWrapStorage, }, }, nil } diff --git a/sdk/plugin/grpc_backend_test.go b/sdk/plugin/grpc_backend_test.go index 01a6ea609f7d6..2f665beb044ab 100644 --- a/sdk/plugin/grpc_backend_test.go +++ b/sdk/plugin/grpc_backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/grpc_events.go b/sdk/plugin/grpc_events.go deleted file mode 100644 index 05d788c66cd0a..0000000000000 --- a/sdk/plugin/grpc_events.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/plugin/pb" - "google.golang.org/grpc" -) - -func newGRPCEventsClient(conn *grpc.ClientConn) *GRPCEventsClient { - return &GRPCEventsClient{ - client: pb.NewEventsClient(conn), - } -} - -type GRPCEventsClient struct { - client pb.EventsClient -} - -var _ logical.EventSender = (*GRPCEventsClient)(nil) - -func (s *GRPCEventsClient) Send(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { - _, err := s.client.SendEvent(ctx, &pb.SendEventRequest{ - EventType: string(eventType), - Event: event, - }) - return err -} - -type GRPCEventsServer struct { - pb.UnimplementedEventsServer - impl logical.EventSender -} - -func (s *GRPCEventsServer) SendEvent(ctx context.Context, req *pb.SendEventRequest) (*pb.Empty, error) { - if s.impl == nil { - return &pb.Empty{}, nil - } - - err := s.impl.Send(ctx, logical.EventType(req.EventType), req.Event) - if err != nil { - return nil, err - } - return &pb.Empty{}, nil -} diff --git a/sdk/plugin/grpc_storage.go b/sdk/plugin/grpc_storage.go index 5c2f0de3f4f05..6a04b3a976871 100644 --- a/sdk/plugin/grpc_storage.go +++ b/sdk/plugin/grpc_storage.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/grpc_system.go b/sdk/plugin/grpc_system.go index bf4537bd58f74..5ab680a5d1f1f 100644 --- a/sdk/plugin/grpc_system.go +++ b/sdk/plugin/grpc_system.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -29,8 +26,6 @@ func newGRPCSystemView(conn *grpc.ClientConn) *gRPCSystemViewClient { } } -var _ logical.SystemView = &gRPCSystemViewClient{} - type gRPCSystemViewClient struct { client pb.SystemViewClient } @@ -182,15 +177,6 @@ func (s *gRPCSystemViewClient) PluginEnv(ctx context.Context) (*logical.PluginEn return reply.PluginEnvironment, nil } -func (s *gRPCSystemViewClient) VaultVersion(ctx context.Context) (string, error) { - reply, err := s.client.PluginEnv(ctx, &pb.Empty{}) - if err != nil { - return "", err - } - - return reply.PluginEnvironment.VaultVersion, nil -} - func (s *gRPCSystemViewClient) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { req := &pb.GeneratePasswordFromPolicyRequest{ PolicyName: policyName, @@ -202,15 +188,6 @@ func (s *gRPCSystemViewClient) GeneratePasswordFromPolicy(ctx context.Context, p return resp.Password, nil } -func (s gRPCSystemViewClient) ClusterID(ctx context.Context) (string, error) { - reply, err := s.client.ClusterInfo(ctx, &pb.Empty{}) - if err != nil { - return "", err - } - - return reply.ClusterID, nil -} - type gRPCSystemViewServer struct { pb.UnimplementedSystemViewServer @@ -379,18 +356,3 @@ func (s *gRPCSystemViewServer) GeneratePasswordFromPolicy(ctx context.Context, r } return resp, nil } - -func (s *gRPCSystemViewServer) ClusterInfo(ctx context.Context, _ *pb.Empty) (*pb.ClusterInfoReply, error) { - if s.impl == nil { - return nil, errMissingSystemView - } - - clusterId, err := s.impl.ClusterID(ctx) - if err != nil { - return &pb.ClusterInfoReply{}, status.Errorf(codes.Internal, "failed to fetch cluster id") - } - - return &pb.ClusterInfoReply{ - ClusterID: clusterId, - }, nil -} diff --git a/sdk/plugin/grpc_system_test.go b/sdk/plugin/grpc_system_test.go index 19a5ecbaaeea0..691c353068f34 100644 --- a/sdk/plugin/grpc_system_test.go +++ b/sdk/plugin/grpc_system_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -116,6 +113,10 @@ func TestSystem_GRPC_replicationState(t *testing.T) { } } +func TestSystem_GRPC_responseWrapData(t *testing.T) { + t.SkipNow() +} + func TestSystem_GRPC_lookupPlugin(t *testing.T) { sys := logical.TestSystemView() client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { diff --git a/sdk/plugin/logger.go b/sdk/plugin/logger.go index 1ef4694e9b8f4..ecf6ed01f158a 100644 --- a/sdk/plugin/logger.go +++ b/sdk/plugin/logger.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import hclog "github.com/hashicorp/go-hclog" diff --git a/sdk/plugin/logger_test.go b/sdk/plugin/logger_test.go index c47a70b1c579b..a2b8a80155cde 100644 --- a/sdk/plugin/logger_test.go +++ b/sdk/plugin/logger_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/middleware.go b/sdk/plugin/middleware.go index 4411c788297b4..546584ccc7367 100644 --- a/sdk/plugin/middleware.go +++ b/sdk/plugin/middleware.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/mock/backend.go b/sdk/plugin/mock/backend.go index 9b3aa2c851e29..a75b639ef6536 100644 --- a/sdk/plugin/mock/backend.go +++ b/sdk/plugin/mock/backend.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/mock/backend_test.go b/sdk/plugin/mock/backend_test.go index 640eec11643df..15860906d4a72 100644 --- a/sdk/plugin/mock/backend_test.go +++ b/sdk/plugin/mock/backend_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/mock/path_errors.go b/sdk/plugin/mock/path_errors.go index f5e5b124fcb22..05ef474a7eafc 100644 --- a/sdk/plugin/mock/path_errors.go +++ b/sdk/plugin/mock/path_errors.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/mock/path_internal.go b/sdk/plugin/mock/path_internal.go index 30c2926f5cfad..26ede270fac21 100644 --- a/sdk/plugin/mock/path_internal.go +++ b/sdk/plugin/mock/path_internal.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/mock/path_kv.go b/sdk/plugin/mock/path_kv.go index fd8080572cd42..1946b57624964 100644 --- a/sdk/plugin/mock/path_kv.go +++ b/sdk/plugin/mock/path_kv.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/mock/path_raw.go b/sdk/plugin/mock/path_raw.go index 2a4b77fb731a2..55cb7c9374087 100644 --- a/sdk/plugin/mock/path_raw.go +++ b/sdk/plugin/mock/path_raw.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/mock/path_special.go b/sdk/plugin/mock/path_special.go index 4223f91053dc0..22afa41c6daed 100644 --- a/sdk/plugin/mock/path_special.go +++ b/sdk/plugin/mock/path_special.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package mock import ( diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go index 82bbae2fd230f..8b764b5fd01da 100644 --- a/sdk/plugin/pb/backend.pb.go +++ b/sdk/plugin/pb/backend.pb.go @@ -1,10 +1,7 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v3.21.5 // source: sdk/plugin/pb/backend.proto package pb @@ -202,12 +199,6 @@ type Paths struct { // should be seal wrapped with extra encryption. It is exact matching // unless it ends with '/' in which case it will be treated as a prefix. SealWrapStorage []string `protobuf:"bytes,4,rep,name=seal_wrap_storage,json=sealWrapStorage,proto3" json:"seal_wrap_storage,omitempty"` - // WriteForwardedStorage are storage paths that, when running on a PR - // Secondary cluster, cause a GRPC call up to the PR Primary cluster's - // active node to handle storage.Put(...) and storage.Delete(...) events. - // - // See extended note in /sdk/logical/logical.go. - WriteForwardedStorage []string `protobuf:"bytes,5,rep,name=write_forwarded_storage,json=writeForwardedStorage,proto3" json:"write_forwarded_storage,omitempty"` } func (x *Paths) Reset() { @@ -270,13 +261,6 @@ func (x *Paths) GetSealWrapStorage() []string { return nil } -func (x *Paths) GetWriteForwardedStorage() []string { - if x != nil { - return x.WriteForwardedStorage - } - return nil -} - type Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -773,7 +757,6 @@ type TokenEntry struct { InternalMeta map[string]string `protobuf:"bytes,19,rep,name=internal_meta,json=internalMeta,proto3" json:"internal_meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` InlinePolicy string `protobuf:"bytes,20,opt,name=inline_policy,json=inlinePolicy,proto3" json:"inline_policy,omitempty"` NoIdentityPolicies bool `protobuf:"varint,21,opt,name=no_identity_policies,json=noIdentityPolicies,proto3" json:"no_identity_policies,omitempty"` - ExternalID string `protobuf:"bytes,22,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` } func (x *TokenEntry) Reset() { @@ -955,13 +938,6 @@ func (x *TokenEntry) GetNoIdentityPolicies() bool { return false } -func (x *TokenEntry) GetExternalID() string { - if x != nil { - return x.ExternalID - } - return "" -} - type LeaseOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3151,69 +3127,6 @@ func (x *GeneratePasswordFromPolicyReply) GetPassword() string { return "" } -type ClusterInfoReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - ClusterID string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Err string `protobuf:"bytes,3,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *ClusterInfoReply) Reset() { - *x = ClusterInfoReply{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClusterInfoReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClusterInfoReply) ProtoMessage() {} - -func (x *ClusterInfoReply) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClusterInfoReply.ProtoReflect.Descriptor instead. -func (*ClusterInfoReply) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{46} -} - -func (x *ClusterInfoReply) GetClusterName() string { - if x != nil { - return x.ClusterName - } - return "" -} - -func (x *ClusterInfoReply) GetClusterID() string { - if x != nil { - return x.ClusterID - } - return "" -} - -func (x *ClusterInfoReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - type Connection struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3231,7 +3144,7 @@ type Connection struct { func (x *Connection) Reset() { *x = Connection{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3244,7 +3157,7 @@ func (x *Connection) String() string { func (*Connection) ProtoMessage() {} func (x *Connection) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3257,7 +3170,7 @@ func (x *Connection) ProtoReflect() protoreflect.Message { // Deprecated: Use Connection.ProtoReflect.Descriptor instead. func (*Connection) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{46} } func (x *Connection) GetRemoteAddr() string { @@ -3303,7 +3216,7 @@ type ConnectionState struct { func (x *ConnectionState) Reset() { *x = ConnectionState{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3316,7 +3229,7 @@ func (x *ConnectionState) String() string { func (*ConnectionState) ProtoMessage() {} func (x *ConnectionState) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3329,7 +3242,7 @@ func (x *ConnectionState) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectionState.ProtoReflect.Descriptor instead. func (*ConnectionState) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} } func (x *ConnectionState) GetVersion() uint32 { @@ -3427,7 +3340,7 @@ type Certificate struct { func (x *Certificate) Reset() { *x = Certificate{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3440,7 +3353,7 @@ func (x *Certificate) String() string { func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3453,7 +3366,7 @@ func (x *Certificate) ProtoReflect() protoreflect.Message { // Deprecated: Use Certificate.ProtoReflect.Descriptor instead. func (*Certificate) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} } func (x *Certificate) GetAsn1Data() []byte { @@ -3474,7 +3387,7 @@ type CertificateChain struct { func (x *CertificateChain) Reset() { *x = CertificateChain{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3487,7 +3400,7 @@ func (x *CertificateChain) String() string { func (*CertificateChain) ProtoMessage() {} func (x *CertificateChain) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3500,7 +3413,7 @@ func (x *CertificateChain) ProtoReflect() protoreflect.Message { // Deprecated: Use CertificateChain.ProtoReflect.Descriptor instead. func (*CertificateChain) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{50} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} } func (x *CertificateChain) GetCertificates() []*Certificate { @@ -3510,61 +3423,6 @@ func (x *CertificateChain) GetCertificates() []*Certificate { return nil } -type SendEventRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EventType string `protobuf:"bytes,1,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` - Event *logical.EventData `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` -} - -func (x *SendEventRequest) Reset() { - *x = SendEventRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SendEventRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendEventRequest) ProtoMessage() {} - -func (x *SendEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendEventRequest.ProtoReflect.Descriptor instead. -func (*SendEventRequest) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{51} -} - -func (x *SendEventRequest) GetEventType() string { - if x != nil { - return x.EventType - } - return "" -} - -func (x *SendEventRequest) GetEvent() *logical.EventData { - if x != nil { - return x.Event - } - return nil -} - var File_sdk_plugin_pb_backend_proto protoreflect.FileDescriptor var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ @@ -3572,545 +3430,519 @@ var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x73, 0x64, 0x6b, - 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, - 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x20, 0x0a, 0x06, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x0a, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, - 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65, 0x72, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, - 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x22, 0xce, 0x01, 0x0a, 0x05, 0x50, 0x61, - 0x74, 0x68, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, - 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, - 0x72, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x15, 0x77, 0x72, 0x69, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, - 0x64, 0x65, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0xbf, 0x06, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, - 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x32, - 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, - 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, - 0x75, 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, - 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, + 0x74, 0x6f, 0x1a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, + 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x20, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65, 0x72, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, + 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, + 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x22, 0x96, 0x01, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6f, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x28, + 0x0a, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, + 0x11, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, + 0x61, 0x70, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0xbf, 0x06, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, + 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, + 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x32, 0x0a, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x55, + 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, 0x61, + 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x05, 0x0a, 0x04, + 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, + 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, + 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, + 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x25, + 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, + 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6e, + 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa9, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x55, + 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, + 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, + 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, + 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x05, 0x0a, - 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, - 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x12, 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, - 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, - 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, - 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, - 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x1d, 0x0a, - 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, - 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, - 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, 0x6d, - 0x55, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, - 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0f, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, - 0x6f, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, + 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x61, + 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x61, 0x78, 0x54, + 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x0d, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x49, 0x64, 0x22, 0xc8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, + 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x31, + 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, + 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, + 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, + 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x16, + 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, + 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, + 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x60, + 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, + 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, + 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, + 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, 0x65, 0x63, 0x69, + 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x05, + 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, + 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, 0x60, 0x0a, + 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x75, + 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, + 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, - 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, - 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x61, - 0x78, 0x54, 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x35, - 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x49, 0x64, 0x22, 0xc8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, - 0x61, 0x75, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xc8, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, - 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x72, - 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, - 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, - 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, - 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, - 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, - 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x60, 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x41, 0x72, 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, 0x65, - 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, - 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, - 0x60, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, - 0x0a, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, 0x65, - 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, 0x6b, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, - 0x72, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, + 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x0c, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, + 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, + 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, 0x10, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x12, + 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, + 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, + 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, + 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x0c, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, - 0x70, 0x22, 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, - 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, 0x10, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x6b, 0x65, 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, - 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, - 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x22, 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, 0x0a, - 0x12, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x54, 0x54, 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, 0x0a, - 0x14, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x22, 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x22, 0x4e, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, - 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, - 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x10, - 0x0a, 0x03, 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, 0x54, - 0x22, 0x5c, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, - 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, - 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, - 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2d, - 0x0a, 0x11, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, 0x0a, - 0x0f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, - 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x65, 0x72, 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, - 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, - 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, - 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, - 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x66, 0x0a, 0x10, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, - 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, - 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, - 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, - 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, - 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x10, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, - 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x6c, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, 0x44, 0x61, 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x32, 0xa5, 0x03, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, - 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, - 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, - 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, - 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, - 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, - 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, - 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, - 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, - 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, - 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, - 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, - 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, - 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, - 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, - 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x32, 0xe1, 0x05, 0x0a, 0x0a, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, - 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, - 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, - 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, - 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, - 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, - 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, - 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, - 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, - 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, - 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, - 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, - 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, - 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, + 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, 0x0a, 0x12, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, + 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, 0x0a, 0x14, 0x43, + 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, + 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x4e, + 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, + 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, + 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x10, 0x0a, 0x03, + 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, 0x54, 0x22, 0x5c, + 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2d, 0x0a, 0x11, + 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, 0x0a, 0x0f, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, + 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, + 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, + 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, + 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x11, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, 0x65, 0x73, + 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, 0x6f, 0x74, + 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, 0x67, 0x6f, + 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, + 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x11, + 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x10, 0x70, + 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, + 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x42, + 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x75, + 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x6c, 0x73, + 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, 0x44, 0x61, + 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, + 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x32, 0xa5, 0x03, 0x0a, 0x07, + 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, + 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, + 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, + 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, + 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, + 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x31, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, + 0x31, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, + 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, + 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, + 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xb1, 0x05, 0x0a, 0x0a, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, + 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, + 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, + 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, + 0x0a, 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, + 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, + 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, + 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, + 0x62, 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, + 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, + 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0x36, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x2c, 0x0a, 0x09, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2a, - 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, - 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, + 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, + 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -4125,7 +3957,7 @@ func file_sdk_plugin_pb_backend_proto_rawDescGZIP() []byte { return file_sdk_plugin_pb_backend_proto_rawDescData } -var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 58) +var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 56) var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ (*Empty)(nil), // 0: pb.Empty (*Header)(nil), // 1: pb.Header @@ -4173,44 +4005,41 @@ var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ (*PluginEnvReply)(nil), // 43: pb.PluginEnvReply (*GeneratePasswordFromPolicyRequest)(nil), // 44: pb.GeneratePasswordFromPolicyRequest (*GeneratePasswordFromPolicyReply)(nil), // 45: pb.GeneratePasswordFromPolicyReply - (*ClusterInfoReply)(nil), // 46: pb.ClusterInfoReply - (*Connection)(nil), // 47: pb.Connection - (*ConnectionState)(nil), // 48: pb.ConnectionState - (*Certificate)(nil), // 49: pb.Certificate - (*CertificateChain)(nil), // 50: pb.CertificateChain - (*SendEventRequest)(nil), // 51: pb.SendEventRequest - nil, // 52: pb.Request.HeadersEntry - nil, // 53: pb.Auth.MetadataEntry - nil, // 54: pb.TokenEntry.MetaEntry - nil, // 55: pb.TokenEntry.InternalMetaEntry - nil, // 56: pb.Response.HeadersEntry - nil, // 57: pb.SetupArgs.ConfigEntry - (*logical.Alias)(nil), // 58: logical.Alias - (*timestamppb.Timestamp)(nil), // 59: google.protobuf.Timestamp - (*logical.Entity)(nil), // 60: logical.Entity - (*logical.Group)(nil), // 61: logical.Group - (*logical.PluginEnvironment)(nil), // 62: logical.PluginEnvironment - (*logical.EventData)(nil), // 63: logical.EventData + (*Connection)(nil), // 46: pb.Connection + (*ConnectionState)(nil), // 47: pb.ConnectionState + (*Certificate)(nil), // 48: pb.Certificate + (*CertificateChain)(nil), // 49: pb.CertificateChain + nil, // 50: pb.Request.HeadersEntry + nil, // 51: pb.Auth.MetadataEntry + nil, // 52: pb.TokenEntry.MetaEntry + nil, // 53: pb.TokenEntry.InternalMetaEntry + nil, // 54: pb.Response.HeadersEntry + nil, // 55: pb.SetupArgs.ConfigEntry + (*logical.Alias)(nil), // 56: logical.Alias + (*timestamppb.Timestamp)(nil), // 57: google.protobuf.Timestamp + (*logical.Entity)(nil), // 58: logical.Entity + (*logical.Group)(nil), // 59: logical.Group + (*logical.PluginEnvironment)(nil), // 60: logical.PluginEnvironment } var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 8, // 0: pb.Request.secret:type_name -> pb.Secret 5, // 1: pb.Request.auth:type_name -> pb.Auth - 52, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry + 50, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry 11, // 3: pb.Request.wrap_info:type_name -> pb.RequestWrapInfo - 47, // 4: pb.Request.connection:type_name -> pb.Connection + 46, // 4: pb.Request.connection:type_name -> pb.Connection 7, // 5: pb.Auth.lease_options:type_name -> pb.LeaseOptions - 53, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry - 58, // 7: pb.Auth.alias:type_name -> logical.Alias - 58, // 8: pb.Auth.group_aliases:type_name -> logical.Alias - 54, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry - 55, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry - 59, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp + 51, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry + 56, // 7: pb.Auth.alias:type_name -> logical.Alias + 56, // 8: pb.Auth.group_aliases:type_name -> logical.Alias + 52, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry + 53, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry + 57, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp 7, // 12: pb.Secret.lease_options:type_name -> pb.LeaseOptions 8, // 13: pb.Response.secret:type_name -> pb.Secret 5, // 14: pb.Response.auth:type_name -> pb.Auth 10, // 15: pb.Response.wrap_info:type_name -> pb.ResponseWrapInfo - 56, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry - 59, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp + 54, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry + 57, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp 4, // 18: pb.HandleRequestArgs.request:type_name -> pb.Request 9, // 19: pb.HandleRequestReply.response:type_name -> pb.Response 2, // 20: pb.HandleRequestReply.err:type_name -> pb.ProtoError @@ -4218,77 +4047,72 @@ var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 3, // 22: pb.SpecialPathsReply.paths:type_name -> pb.Paths 4, // 23: pb.HandleExistenceCheckArgs.request:type_name -> pb.Request 2, // 24: pb.HandleExistenceCheckReply.err:type_name -> pb.ProtoError - 57, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry + 55, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry 23, // 26: pb.StorageGetReply.entry:type_name -> pb.StorageEntry 23, // 27: pb.StoragePutArgs.entry:type_name -> pb.StorageEntry 10, // 28: pb.ResponseWrapDataReply.wrap_info:type_name -> pb.ResponseWrapInfo - 60, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity - 61, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group - 62, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment - 48, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState - 50, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain - 50, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain - 49, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate - 63, // 36: pb.SendEventRequest.event:type_name -> logical.EventData - 1, // 37: pb.Request.HeadersEntry.value:type_name -> pb.Header - 1, // 38: pb.Response.HeadersEntry.value:type_name -> pb.Header - 12, // 39: pb.Backend.HandleRequest:input_type -> pb.HandleRequestArgs - 0, // 40: pb.Backend.SpecialPaths:input_type -> pb.Empty - 17, // 41: pb.Backend.HandleExistenceCheck:input_type -> pb.HandleExistenceCheckArgs - 0, // 42: pb.Backend.Cleanup:input_type -> pb.Empty - 22, // 43: pb.Backend.InvalidateKey:input_type -> pb.InvalidateKeyArgs - 19, // 44: pb.Backend.Setup:input_type -> pb.SetupArgs - 14, // 45: pb.Backend.Initialize:input_type -> pb.InitializeArgs - 0, // 46: pb.Backend.Type:input_type -> pb.Empty - 24, // 47: pb.Storage.List:input_type -> pb.StorageListArgs - 26, // 48: pb.Storage.Get:input_type -> pb.StorageGetArgs - 28, // 49: pb.Storage.Put:input_type -> pb.StoragePutArgs - 30, // 50: pb.Storage.Delete:input_type -> pb.StorageDeleteArgs - 0, // 51: pb.SystemView.DefaultLeaseTTL:input_type -> pb.Empty - 0, // 52: pb.SystemView.MaxLeaseTTL:input_type -> pb.Empty - 0, // 53: pb.SystemView.Tainted:input_type -> pb.Empty - 0, // 54: pb.SystemView.CachingDisabled:input_type -> pb.Empty - 0, // 55: pb.SystemView.ReplicationState:input_type -> pb.Empty - 36, // 56: pb.SystemView.ResponseWrapData:input_type -> pb.ResponseWrapDataArgs - 0, // 57: pb.SystemView.MlockEnabled:input_type -> pb.Empty - 0, // 58: pb.SystemView.LocalMount:input_type -> pb.Empty - 40, // 59: pb.SystemView.EntityInfo:input_type -> pb.EntityInfoArgs - 0, // 60: pb.SystemView.PluginEnv:input_type -> pb.Empty - 40, // 61: pb.SystemView.GroupsForEntity:input_type -> pb.EntityInfoArgs - 44, // 62: pb.SystemView.GeneratePasswordFromPolicy:input_type -> pb.GeneratePasswordFromPolicyRequest - 0, // 63: pb.SystemView.ClusterInfo:input_type -> pb.Empty - 51, // 64: pb.Events.SendEvent:input_type -> pb.SendEventRequest - 13, // 65: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply - 16, // 66: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply - 18, // 67: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply - 0, // 68: pb.Backend.Cleanup:output_type -> pb.Empty - 0, // 69: pb.Backend.InvalidateKey:output_type -> pb.Empty - 20, // 70: pb.Backend.Setup:output_type -> pb.SetupReply - 15, // 71: pb.Backend.Initialize:output_type -> pb.InitializeReply - 21, // 72: pb.Backend.Type:output_type -> pb.TypeReply - 25, // 73: pb.Storage.List:output_type -> pb.StorageListReply - 27, // 74: pb.Storage.Get:output_type -> pb.StorageGetReply - 29, // 75: pb.Storage.Put:output_type -> pb.StoragePutReply - 31, // 76: pb.Storage.Delete:output_type -> pb.StorageDeleteReply - 32, // 77: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply - 32, // 78: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply - 33, // 79: pb.SystemView.Tainted:output_type -> pb.TaintedReply - 34, // 80: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply - 35, // 81: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply - 37, // 82: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply - 38, // 83: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply - 39, // 84: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply - 41, // 85: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply - 43, // 86: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply - 42, // 87: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply - 45, // 88: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply - 46, // 89: pb.SystemView.ClusterInfo:output_type -> pb.ClusterInfoReply - 0, // 90: pb.Events.SendEvent:output_type -> pb.Empty - 65, // [65:91] is the sub-list for method output_type - 39, // [39:65] is the sub-list for method input_type - 39, // [39:39] is the sub-list for extension type_name - 39, // [39:39] is the sub-list for extension extendee - 0, // [0:39] is the sub-list for field type_name + 58, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity + 59, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group + 60, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment + 47, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState + 49, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain + 49, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain + 48, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate + 1, // 36: pb.Request.HeadersEntry.value:type_name -> pb.Header + 1, // 37: pb.Response.HeadersEntry.value:type_name -> pb.Header + 12, // 38: pb.Backend.HandleRequest:input_type -> pb.HandleRequestArgs + 0, // 39: pb.Backend.SpecialPaths:input_type -> pb.Empty + 17, // 40: pb.Backend.HandleExistenceCheck:input_type -> pb.HandleExistenceCheckArgs + 0, // 41: pb.Backend.Cleanup:input_type -> pb.Empty + 22, // 42: pb.Backend.InvalidateKey:input_type -> pb.InvalidateKeyArgs + 19, // 43: pb.Backend.Setup:input_type -> pb.SetupArgs + 14, // 44: pb.Backend.Initialize:input_type -> pb.InitializeArgs + 0, // 45: pb.Backend.Type:input_type -> pb.Empty + 24, // 46: pb.Storage.List:input_type -> pb.StorageListArgs + 26, // 47: pb.Storage.Get:input_type -> pb.StorageGetArgs + 28, // 48: pb.Storage.Put:input_type -> pb.StoragePutArgs + 30, // 49: pb.Storage.Delete:input_type -> pb.StorageDeleteArgs + 0, // 50: pb.SystemView.DefaultLeaseTTL:input_type -> pb.Empty + 0, // 51: pb.SystemView.MaxLeaseTTL:input_type -> pb.Empty + 0, // 52: pb.SystemView.Tainted:input_type -> pb.Empty + 0, // 53: pb.SystemView.CachingDisabled:input_type -> pb.Empty + 0, // 54: pb.SystemView.ReplicationState:input_type -> pb.Empty + 36, // 55: pb.SystemView.ResponseWrapData:input_type -> pb.ResponseWrapDataArgs + 0, // 56: pb.SystemView.MlockEnabled:input_type -> pb.Empty + 0, // 57: pb.SystemView.LocalMount:input_type -> pb.Empty + 40, // 58: pb.SystemView.EntityInfo:input_type -> pb.EntityInfoArgs + 0, // 59: pb.SystemView.PluginEnv:input_type -> pb.Empty + 40, // 60: pb.SystemView.GroupsForEntity:input_type -> pb.EntityInfoArgs + 44, // 61: pb.SystemView.GeneratePasswordFromPolicy:input_type -> pb.GeneratePasswordFromPolicyRequest + 13, // 62: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply + 16, // 63: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply + 18, // 64: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply + 0, // 65: pb.Backend.Cleanup:output_type -> pb.Empty + 0, // 66: pb.Backend.InvalidateKey:output_type -> pb.Empty + 20, // 67: pb.Backend.Setup:output_type -> pb.SetupReply + 15, // 68: pb.Backend.Initialize:output_type -> pb.InitializeReply + 21, // 69: pb.Backend.Type:output_type -> pb.TypeReply + 25, // 70: pb.Storage.List:output_type -> pb.StorageListReply + 27, // 71: pb.Storage.Get:output_type -> pb.StorageGetReply + 29, // 72: pb.Storage.Put:output_type -> pb.StoragePutReply + 31, // 73: pb.Storage.Delete:output_type -> pb.StorageDeleteReply + 32, // 74: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply + 32, // 75: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply + 33, // 76: pb.SystemView.Tainted:output_type -> pb.TaintedReply + 34, // 77: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply + 35, // 78: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply + 37, // 79: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply + 38, // 80: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply + 39, // 81: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply + 41, // 82: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply + 43, // 83: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply + 42, // 84: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply + 45, // 85: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply + 62, // [62:86] is the sub-list for method output_type + 38, // [38:62] is the sub-list for method input_type + 38, // [38:38] is the sub-list for extension type_name + 38, // [38:38] is the sub-list for extension extendee + 0, // [0:38] is the sub-list for field type_name } func init() { file_sdk_plugin_pb_backend_proto_init() } @@ -4850,18 +4674,6 @@ func file_sdk_plugin_pb_backend_proto_init() { } } file_sdk_plugin_pb_backend_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterInfoReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_plugin_pb_backend_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Connection); i { case 0: return &v.state @@ -4873,7 +4685,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConnectionState); i { case 0: return &v.state @@ -4885,7 +4697,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Certificate); i { case 0: return &v.state @@ -4897,7 +4709,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CertificateChain); i { case 0: return &v.state @@ -4909,18 +4721,6 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendEventRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } type x struct{} out := protoimpl.TypeBuilder{ @@ -4928,9 +4728,9 @@ func file_sdk_plugin_pb_backend_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sdk_plugin_pb_backend_proto_rawDesc, NumEnums: 0, - NumMessages: 58, + NumMessages: 56, NumExtensions: 0, - NumServices: 4, + NumServices: 3, }, GoTypes: file_sdk_plugin_pb_backend_proto_goTypes, DependencyIndexes: file_sdk_plugin_pb_backend_proto_depIDxs, diff --git a/sdk/plugin/pb/backend.proto b/sdk/plugin/pb/backend.proto index ded407788a72c..19a56be639e46 100644 --- a/sdk/plugin/pb/backend.proto +++ b/sdk/plugin/pb/backend.proto @@ -1,13 +1,9 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - syntax = "proto3"; package pb; option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; import "google/protobuf/timestamp.proto"; -import "sdk/logical/event.proto"; import "sdk/logical/identity.proto"; import "sdk/logical/plugin.proto"; @@ -51,13 +47,6 @@ message Paths { // should be seal wrapped with extra encryption. It is exact matching // unless it ends with '/' in which case it will be treated as a prefix. repeated string seal_wrap_storage = 4; - - // WriteForwardedStorage are storage paths that, when running on a PR - // Secondary cluster, cause a GRPC call up to the PR Primary cluster's - // active node to handle storage.Put(...) and storage.Delete(...) events. - // - // See extended note in /sdk/logical/logical.go. - repeated string write_forwarded_storage = 5; } message Request { @@ -249,7 +238,6 @@ message TokenEntry { map internal_meta = 19; string inline_policy = 20; bool no_identity_policies = 21; - string external_id = 22; } message LeaseOptions { @@ -578,12 +566,6 @@ message GeneratePasswordFromPolicyReply { string password = 1; } -message ClusterInfoReply { - string cluster_name = 1; - string cluster_id = 2; - string err = 3; -} - // SystemView exposes system configuration information in a safe way for plugins // to consume. Plugins should implement the client for this service. service SystemView { @@ -636,9 +618,6 @@ service SystemView { // GeneratePasswordFromPolicy generates a password from an existing password policy rpc GeneratePasswordFromPolicy(GeneratePasswordFromPolicyRequest) returns (GeneratePasswordFromPolicyReply); - - // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. - rpc ClusterInfo(Empty) returns (ClusterInfoReply); } message Connection { @@ -678,11 +657,3 @@ message CertificateChain { repeated Certificate certificates = 1; } -message SendEventRequest { - string event_type = 1; - logical.EventData event = 2; -} - -service Events { - rpc SendEvent(SendEventRequest) returns (Empty); -} \ No newline at end of file diff --git a/sdk/plugin/pb/backend_grpc.pb.go b/sdk/plugin/pb/backend_grpc.pb.go index a8f3107e04074..9be6bacdc5d66 100644 --- a/sdk/plugin/pb/backend_grpc.pb.go +++ b/sdk/plugin/pb/backend_grpc.pb.go @@ -642,8 +642,6 @@ type SystemViewClient interface { GroupsForEntity(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*GroupsForEntityReply, error) // GeneratePasswordFromPolicy generates a password from an existing password policy GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) - // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. - ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) } type systemViewClient struct { @@ -762,15 +760,6 @@ func (c *systemViewClient) GeneratePasswordFromPolicy(ctx context.Context, in *G return out, nil } -func (c *systemViewClient) ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) { - out := new(ClusterInfoReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/ClusterInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // SystemViewServer is the server API for SystemView service. // All implementations must embed UnimplementedSystemViewServer // for forward compatibility @@ -813,8 +802,6 @@ type SystemViewServer interface { GroupsForEntity(context.Context, *EntityInfoArgs) (*GroupsForEntityReply, error) // GeneratePasswordFromPolicy generates a password from an existing password policy GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) - // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. - ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) mustEmbedUnimplementedSystemViewServer() } @@ -858,9 +845,6 @@ func (UnimplementedSystemViewServer) GroupsForEntity(context.Context, *EntityInf func (UnimplementedSystemViewServer) GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) { return nil, status.Errorf(codes.Unimplemented, "method GeneratePasswordFromPolicy not implemented") } -func (UnimplementedSystemViewServer) ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") -} func (UnimplementedSystemViewServer) mustEmbedUnimplementedSystemViewServer() {} // UnsafeSystemViewServer may be embedded to opt out of forward compatibility for this service. @@ -1090,24 +1074,6 @@ func _SystemView_GeneratePasswordFromPolicy_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } -func _SystemView_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SystemViewServer).ClusterInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pb.SystemView/ClusterInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SystemViewServer).ClusterInfo(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - // SystemView_ServiceDesc is the grpc.ServiceDesc for SystemView service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1163,96 +1129,6 @@ var SystemView_ServiceDesc = grpc.ServiceDesc{ MethodName: "GeneratePasswordFromPolicy", Handler: _SystemView_GeneratePasswordFromPolicy_Handler, }, - { - MethodName: "ClusterInfo", - Handler: _SystemView_ClusterInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "sdk/plugin/pb/backend.proto", -} - -// EventsClient is the client API for Events service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type EventsClient interface { - SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) -} - -type eventsClient struct { - cc grpc.ClientConnInterface -} - -func NewEventsClient(cc grpc.ClientConnInterface) EventsClient { - return &eventsClient{cc} -} - -func (c *eventsClient) SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/pb.Events/SendEvent", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// EventsServer is the server API for Events service. -// All implementations must embed UnimplementedEventsServer -// for forward compatibility -type EventsServer interface { - SendEvent(context.Context, *SendEventRequest) (*Empty, error) - mustEmbedUnimplementedEventsServer() -} - -// UnimplementedEventsServer must be embedded to have forward compatible implementations. -type UnimplementedEventsServer struct { -} - -func (UnimplementedEventsServer) SendEvent(context.Context, *SendEventRequest) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendEvent not implemented") -} -func (UnimplementedEventsServer) mustEmbedUnimplementedEventsServer() {} - -// UnsafeEventsServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to EventsServer will -// result in compilation errors. -type UnsafeEventsServer interface { - mustEmbedUnimplementedEventsServer() -} - -func RegisterEventsServer(s grpc.ServiceRegistrar, srv EventsServer) { - s.RegisterService(&Events_ServiceDesc, srv) -} - -func _Events_SendEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendEventRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EventsServer).SendEvent(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pb.Events/SendEvent", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EventsServer).SendEvent(ctx, req.(*SendEventRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Events_ServiceDesc is the grpc.ServiceDesc for Events service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Events_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "pb.Events", - HandlerType: (*EventsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SendEvent", - Handler: _Events_SendEvent_Handler, - }, }, Streams: []grpc.StreamDesc{}, Metadata: "sdk/plugin/pb/backend.proto", diff --git a/sdk/plugin/pb/translation.go b/sdk/plugin/pb/translation.go index 92ca9af242855..732086b45e3ca 100644 --- a/sdk/plugin/pb/translation.go +++ b/sdk/plugin/pb/translation.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pb import ( @@ -623,7 +620,6 @@ func LogicalTokenEntryToProtoTokenEntry(t *logical.TokenEntry) *TokenEntry { NamespaceID: t.NamespaceID, CubbyholeID: t.CubbyholeID, Type: uint32(t.Type), - ExternalID: t.ExternalID, } } @@ -664,7 +660,6 @@ func ProtoTokenEntryToLogicalTokenEntry(t *TokenEntry) (*logical.TokenEntry, err NamespaceID: t.NamespaceID, CubbyholeID: t.CubbyholeID, Type: logical.TokenType(t.Type), - ExternalID: t.ExternalID, }, nil } diff --git a/sdk/plugin/pb/translation_test.go b/sdk/plugin/pb/translation_test.go index 30979257acc14..83cad401e5534 100644 --- a/sdk/plugin/pb/translation_test.go +++ b/sdk/plugin/pb/translation_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package pb import ( diff --git a/sdk/plugin/plugin.go b/sdk/plugin/plugin.go index ec58417ec7d22..edbffcd6983ac 100644 --- a/sdk/plugin/plugin.go +++ b/sdk/plugin/plugin.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -154,4 +151,14 @@ func (b *BackendPluginClient) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -var _ logical.PluginVersioner = (*BackendPluginClient)(nil) +func (b *BackendPluginClient) IsExternal() bool { + if externaler, ok := b.Backend.(logical.Externaler); ok { + return externaler.IsExternal() + } + return true // default to true since this is only used for GRPC plugins +} + +var ( + _ logical.PluginVersioner = (*BackendPluginClient)(nil) + _ logical.Externaler = (*BackendPluginClient)(nil) +) diff --git a/sdk/plugin/plugin_v5.go b/sdk/plugin/plugin_v5.go index cc2d1383775d4..2adf020a48eee 100644 --- a/sdk/plugin/plugin_v5.go +++ b/sdk/plugin/plugin_v5.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( @@ -55,7 +52,10 @@ func (b *BackendPluginClientV5) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -var _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) +var ( + _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) + _ logical.Externaler = (*BackendPluginClientV5)(nil) +) // NewBackendV5 will return an instance of an RPC-based client implementation of // the backend for external plugins, or a concrete implementation of the diff --git a/sdk/plugin/serve.go b/sdk/plugin/serve.go index 9ad2b820bb7ed..0da143f769b8b 100644 --- a/sdk/plugin/serve.go +++ b/sdk/plugin/serve.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/plugin/storage_test.go b/sdk/plugin/storage_test.go index 61a5deec6720d..651d361990428 100644 --- a/sdk/plugin/storage_test.go +++ b/sdk/plugin/storage_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package plugin import ( diff --git a/sdk/queue/priority_queue.go b/sdk/queue/priority_queue.go index 802a538587b30..3994841773e65 100644 --- a/sdk/queue/priority_queue.go +++ b/sdk/queue/priority_queue.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - // Package queue provides Vault plugins with a Priority Queue. It can be used // as an in-memory list of queue.Item sorted by their priority, and offers // methods to find or remove items by their key. Internally it uses diff --git a/sdk/queue/priority_queue_test.go b/sdk/queue/priority_queue_test.go index 108a26cc0edcf..928442b5246b8 100644 --- a/sdk/queue/priority_queue_test.go +++ b/sdk/queue/priority_queue_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package queue import ( diff --git a/sdk/version/cgo.go b/sdk/version/cgo.go new file mode 100644 index 0000000000000..5bc93e5bfcdaa --- /dev/null +++ b/sdk/version/cgo.go @@ -0,0 +1,7 @@ +//go:build cgo + +package version + +func init() { + CgoEnabled = true +} diff --git a/version/version.go b/sdk/version/version.go similarity index 96% rename from version/version.go rename to sdk/version/version.go index 2dbcfb41c98d2..78b8eb829cdde 100644 --- a/version/version.go +++ b/sdk/version/version.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package version import ( @@ -8,6 +5,7 @@ import ( "fmt" ) +// VersionInfo type VersionInfo struct { Revision string `json:"revision,omitempty"` Version string `json:"version,omitempty"` diff --git a/sdk/version/version_base.go b/sdk/version/version_base.go new file mode 100644 index 0000000000000..4a455d426a475 --- /dev/null +++ b/sdk/version/version_base.go @@ -0,0 +1,17 @@ +package version + +var ( + // The git commit that was compiled. This will be filled in by the compiler. + GitCommit string + GitDescribe string + + // The compilation date. This will be filled in by the compiler. + BuildDate string + + // Whether cgo is enabled or not; set at build time + CgoEnabled bool + + Version = "1.12.7" + VersionPrerelease = "" + VersionMetadata = "" +) diff --git a/serviceregistration/consul/consul_service_registration.go b/serviceregistration/consul/consul_service_registration.go index a49ab4ff63f76..79008967a5694 100644 --- a/serviceregistration/consul/consul_service_registration.go +++ b/serviceregistration/consul/consul_service_registration.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/serviceregistration/consul/consul_service_registration_test.go b/serviceregistration/consul/consul_service_registration_test.go index 0ced651e0242f..21b2b2573b533 100644 --- a/serviceregistration/consul/consul_service_registration_test.go +++ b/serviceregistration/consul/consul_service_registration_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package consul import ( diff --git a/serviceregistration/kubernetes/client/client.go b/serviceregistration/kubernetes/client/client.go index 96d1952724530..934d3bad908cd 100644 --- a/serviceregistration/kubernetes/client/client.go +++ b/serviceregistration/kubernetes/client/client.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package client import ( diff --git a/serviceregistration/kubernetes/client/client_test.go b/serviceregistration/kubernetes/client/client_test.go index de11dad37e230..9f0dfad6e9a4b 100644 --- a/serviceregistration/kubernetes/client/client_test.go +++ b/serviceregistration/kubernetes/client/client_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package client import ( diff --git a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go index 7060a063e2d7a..9eb031a362c69 100644 --- a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go +++ b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package main // This code builds a minimal binary of the lightweight kubernetes diff --git a/serviceregistration/kubernetes/client/config.go b/serviceregistration/kubernetes/client/config.go index be98240e21950..4e6a0f45848c6 100644 --- a/serviceregistration/kubernetes/client/config.go +++ b/serviceregistration/kubernetes/client/config.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package client import ( diff --git a/serviceregistration/kubernetes/retry_handler.go b/serviceregistration/kubernetes/retry_handler.go index 46ac18eafdddc..68afa8cdc576d 100644 --- a/serviceregistration/kubernetes/retry_handler.go +++ b/serviceregistration/kubernetes/retry_handler.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( diff --git a/serviceregistration/kubernetes/retry_handler_test.go b/serviceregistration/kubernetes/retry_handler_test.go index 0dd61113923e4..e2be809d0a095 100644 --- a/serviceregistration/kubernetes/retry_handler_test.go +++ b/serviceregistration/kubernetes/retry_handler_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( diff --git a/serviceregistration/kubernetes/service_registration.go b/serviceregistration/kubernetes/service_registration.go index f377cbb9874eb..f1c9a3c8ce401 100644 --- a/serviceregistration/kubernetes/service_registration.go +++ b/serviceregistration/kubernetes/service_registration.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( diff --git a/serviceregistration/kubernetes/service_registration_test.go b/serviceregistration/kubernetes/service_registration_test.go index a6a93c9ceeeed..a1bf001f16424 100644 --- a/serviceregistration/kubernetes/service_registration_test.go +++ b/serviceregistration/kubernetes/service_registration_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package kubernetes import ( diff --git a/serviceregistration/kubernetes/testing/testserver.go b/serviceregistration/kubernetes/testing/testserver.go index 6ceb94018625b..4f406eb6871bc 100644 --- a/serviceregistration/kubernetes/testing/testserver.go +++ b/serviceregistration/kubernetes/testing/testserver.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package testing import ( diff --git a/serviceregistration/service_registration.go b/serviceregistration/service_registration.go index 79f5b20d18839..5dc67af2bee10 100644 --- a/serviceregistration/service_registration.go +++ b/serviceregistration/service_registration.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package serviceregistration /* diff --git a/shamir/shamir.go b/shamir/shamir.go index d9c0271a70985..22e3c337dcb14 100644 --- a/shamir/shamir.go +++ b/shamir/shamir.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package shamir import ( diff --git a/shamir/shamir_test.go b/shamir/shamir_test.go index 940a34ecf167f..90a7c371c2942 100644 --- a/shamir/shamir_test.go +++ b/shamir/shamir_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package shamir import ( diff --git a/tools/godoctests/main.go b/tools/godoctests/main.go deleted file mode 100644 index caa6ca0b93709..0000000000000 --- a/tools/godoctests/main.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package main - -import ( - "github.com/hashicorp/vault/tools/godoctests/pkg/analyzer" - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(analyzer.Analyzer) -} diff --git a/tools/godoctests/pkg/analyzer/analyzer.go b/tools/godoctests/pkg/analyzer/analyzer.go deleted file mode 100644 index 38ed37d933a6d..0000000000000 --- a/tools/godoctests/pkg/analyzer/analyzer.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package analyzer - -import ( - "go/ast" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" -) - -var Analyzer = &analysis.Analyzer{ - Name: "godoctests", - Doc: "Verifies that every go test has a go doc", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, -} - -func run(pass *analysis.Pass) (interface{}, error) { - inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - nodeFilter := []ast.Node{ - (*ast.FuncDecl)(nil), - } - - inspector.Preorder(nodeFilter, func(node ast.Node) { - funcDecl, ok := node.(*ast.FuncDecl) - if !ok { - return - } - - // starts with 'Test' - if !strings.HasPrefix(funcDecl.Name.Name, "Test") { - return - } - - // has one parameter - params := funcDecl.Type.Params.List - if len(params) != 1 { - return - } - - // parameter is a pointer - firstParamType, ok := params[0].Type.(*ast.StarExpr) - if !ok { - return - } - - selector, ok := firstParamType.X.(*ast.SelectorExpr) - if !ok { - return - } - - // the pointer comes from package 'testing' - selectorIdent, ok := selector.X.(*ast.Ident) - if !ok { - return - } - if selectorIdent.Name != "testing" { - return - } - - // the pointer has type 'T' - if selector.Sel == nil || selector.Sel.Name != "T" { - return - } - - // then there must be a godoc - if funcDecl.Doc == nil { - pass.Reportf(node.Pos(), "Test %s is missing a go doc", - funcDecl.Name.Name) - } else if !strings.HasPrefix(funcDecl.Doc.Text(), funcDecl.Name.Name) { - pass.Reportf(node.Pos(), "Test %s must have a go doc beginning with the function name", - funcDecl.Name.Name) - } - }) - return nil, nil -} diff --git a/tools/godoctests/pkg/analyzer/analyzer_test.go b/tools/godoctests/pkg/analyzer/analyzer_test.go deleted file mode 100644 index df1bfafd46324..0000000000000 --- a/tools/godoctests/pkg/analyzer/analyzer_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package analyzer - -import ( - "os" - "path/filepath" - "testing" - - "golang.org/x/tools/go/analysis/analysistest" -) - -// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against -// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected -// not to report anything. -func TestAnalyzer(t *testing.T) { - f, err := os.Getwd() - if err != nil { - t.Fatal("failed to get working directory", err) - } - analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") -} diff --git a/tools/godoctests/pkg/analyzer/testdata/funcs.go b/tools/godoctests/pkg/analyzer/testdata/funcs.go deleted file mode 100644 index e9d5fead6744c..0000000000000 --- a/tools/godoctests/pkg/analyzer/testdata/funcs.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testdata - -import "testing" - -// Test_GoDocOK is a test that has a go doc -func Test_GoDocOK(t *testing.T) {} - -func Test_NoGoDocFails(t *testing.T) {} // want "Test Test_NoGoDocFails is missing a go doc" - -// This test does not have a go doc beginning with the function name -func Test_BadGoDocFails(t *testing.T) {} // want "Test Test_BadGoDocFails must have a go doc beginning with the function name" - -func test_TestHelperNoGoDocOK(t *testing.T) {} - -func Test_DifferentSignatureNoGoDocOK() {} - -func Test_DifferentSignature2NoGoDocOK(t *testing.T, a int) {} diff --git a/tools/gonilnilfunctions/main.go b/tools/gonilnilfunctions/main.go deleted file mode 100644 index 68fd796a9f8dd..0000000000000 --- a/tools/gonilnilfunctions/main.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package main - -import ( - "github.com/hashicorp/vault/tools/gonilnilfunctions/pkg/analyzer" - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(analyzer.Analyzer) -} diff --git a/tools/gonilnilfunctions/pkg/analyzer/analyzer.go b/tools/gonilnilfunctions/pkg/analyzer/analyzer.go deleted file mode 100644 index 2b9f17a6e8f39..0000000000000 --- a/tools/gonilnilfunctions/pkg/analyzer/analyzer.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package analyzer - -import ( - "go/ast" - "go/types" - "reflect" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" -) - -var Analyzer = &analysis.Analyzer{ - Name: "gonilnilfunctions", - Doc: "Verifies that every go function with error as one of its two return types cannot return nil, nil", - Run: run, - ResultType: reflect.TypeOf((interface{})(nil)), - Requires: []*analysis.Analyzer{inspect.Analyzer}, -} - -// getNestedReturnStatements searches the AST for return statements, and returns -// them in a tail-call optimized list. -func getNestedReturnStatements(s ast.Stmt, returns []*ast.ReturnStmt) []*ast.ReturnStmt { - switch s := s.(type) { - case *ast.BlockStmt: - statements := make([]*ast.ReturnStmt, 0) - for _, stmt := range s.List { - statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) - } - - return append(returns, statements...) - case *ast.BranchStmt: - return returns - case *ast.ForStmt: - return getNestedReturnStatements(s.Body, returns) - case *ast.IfStmt: - return getNestedReturnStatements(s.Body, returns) - case *ast.LabeledStmt: - return getNestedReturnStatements(s.Stmt, returns) - case *ast.RangeStmt: - return getNestedReturnStatements(s.Body, returns) - case *ast.ReturnStmt: - return append(returns, s) - case *ast.SwitchStmt: - return getNestedReturnStatements(s.Body, returns) - case *ast.SelectStmt: - return getNestedReturnStatements(s.Body, returns) - case *ast.TypeSwitchStmt: - return getNestedReturnStatements(s.Body, returns) - case *ast.CommClause: - statements := make([]*ast.ReturnStmt, 0) - for _, stmt := range s.Body { - statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) - } - - return append(returns, statements...) - case *ast.CaseClause: - statements := make([]*ast.ReturnStmt, 0) - for _, stmt := range s.Body { - statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) - } - - return append(returns, statements...) - case *ast.ExprStmt: - return returns - } - return returns -} - -// run runs the analysis, failing for functions whose signatures contain two results including one error -// (e.g. (something, error)), that contain multiple nil returns -func run(pass *analysis.Pass) (interface{}, error) { - inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - nodeFilter := []ast.Node{ - (*ast.FuncDecl)(nil), - } - - inspector.Preorder(nodeFilter, func(node ast.Node) { - funcDecl, ok := node.(*ast.FuncDecl) - if !ok { - return - } - - // If the function has the "Ignore" godoc comment, skip it - if strings.Contains(funcDecl.Doc.Text(), "ignore-nil-nil-function-check") { - return - } - - // The function returns something - if funcDecl == nil || funcDecl.Type == nil || funcDecl.Type.Results == nil { - return - } - - // The function has more than 1 return value - results := funcDecl.Type.Results.List - if len(results) < 2 { - return - } - - // isError is a helper function to check if a Field is of error type - isError := func(field *ast.Field) bool { - if named, ok := pass.TypesInfo.TypeOf(field.Type).(*types.Named); ok { - namedObject := named.Obj() - return namedObject != nil && namedObject.Pkg() == nil && namedObject.Name() == "error" - } - return false - } - - // one of the return values is error - var errorFound bool - for _, result := range results { - if isError(result) { - errorFound = true - break - } - } - - if !errorFound { - return - } - - // Since these statements might be e.g. blocks with - // other statements inside, we need to get the return statements - // from inside them, first. - statements := funcDecl.Body.List - - returnStatements := make([]*ast.ReturnStmt, 0) - for _, statement := range statements { - returnStatements = append(returnStatements, getNestedReturnStatements(statement, make([]*ast.ReturnStmt, 0))...) - } - - for _, returnStatement := range returnStatements { - numResultsNil := 0 - results := returnStatement.Results - - // We only want two-arg functions (something, nil) - // We can remove this block in the future if we change our mind - if len(results) != 2 { - continue - } - - for _, result := range results { - // nil is an ident - ident, isIdent := result.(*ast.Ident) - if isIdent { - if ident.Name == "nil" { - // We found one nil in the return list - numResultsNil++ - } - } - } - // We found N nils, and our function returns N results, so this fails the check - if numResultsNil == len(results) { - // All the return values are nil, so we fail the report - pass.Reportf(node.Pos(), "Function %s can return an error, and has a statement that returns only nils", - funcDecl.Name.Name) - - // We break out of the loop of checking return statements, so that we don't repeat ourselves - break - } - } - }) - - var success interface{} - return success, nil -} diff --git a/tools/gonilnilfunctions/pkg/analyzer/analyzer_test.go b/tools/gonilnilfunctions/pkg/analyzer/analyzer_test.go deleted file mode 100644 index df1bfafd46324..0000000000000 --- a/tools/gonilnilfunctions/pkg/analyzer/analyzer_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package analyzer - -import ( - "os" - "path/filepath" - "testing" - - "golang.org/x/tools/go/analysis/analysistest" -) - -// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against -// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected -// not to report anything. -func TestAnalyzer(t *testing.T) { - f, err := os.Getwd() - if err != nil { - t.Fatal("failed to get working directory", err) - } - analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") -} diff --git a/tools/gonilnilfunctions/pkg/analyzer/testdata/funcs.go b/tools/gonilnilfunctions/pkg/analyzer/testdata/funcs.go deleted file mode 100644 index f783f01219b12..0000000000000 --- a/tools/gonilnilfunctions/pkg/analyzer/testdata/funcs.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testdata - -func ReturnReturnOkay() (any, error) { - var i interface{} - return i, nil -} - -func OneGoodOneBad() (any, error) { // want "Function OneGoodOneBad can return an error, and has a statement that returns only nils" - var i interface{} - if true { - return i, nil - } - return nil, nil -} - -func OneBadOneGood() (any, error) { // want "Function OneBadOneGood can return an error, and has a statement that returns only nils" - var i interface{} - if true { - return nil, nil - } - return i, nil -} - -func EmptyFunc() {} - -func TwoNilNils() (any, error) { // want "Function TwoNilNils can return an error, and has a statement that returns only nils" - if true { - return nil, nil - } - return nil, nil -} - -// ThreeResults should not fail, as while it returns nil, nil, nil, it has three results, not two. -func ThreeResults() (any, any, error) { - return nil, nil, nil -} - -func TwoArgsNoError() (any, any) { - return nil, nil -} - -func NestedReturn() (any, error) { // want "Function NestedReturn can return an error, and has a statement that returns only nils" - { - { - { - return nil, nil - } - } - } -} - -func NestedForReturn() (any, error) { // want "Function NestedForReturn can return an error, and has a statement that returns only nils" - for { - for i := 0; i < 100; i++ { - { - return nil, nil - } - } - } -} - -func AnyErrorNilNil() (any, error) { // want "Function AnyErrorNilNil can return an error, and has a statement that returns only nils" - return nil, nil -} - -// Skipped should be skipped because of the following line: -// ignore-nil-nil-function-check -func Skipped() (any, error) { - return nil, nil -} diff --git a/tools/semgrep/ci/atomic.yml b/tools/semgrep/ci/atomic.yml index 1d6b2a9da7995..2fea38bd77b4f 100644 --- a/tools/semgrep/ci/atomic.yml +++ b/tools/semgrep/ci/atomic.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: atomics-64bit-safety patterns: diff --git a/tools/semgrep/ci/bad-multierror-append.yml b/tools/semgrep/ci/bad-multierror-append.yml index bebb20013f396..86b637577d9de 100644 --- a/tools/semgrep/ci/bad-multierror-append.yml +++ b/tools/semgrep/ci/bad-multierror-append.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: bad-multierror-append patterns: diff --git a/tools/semgrep/ci/bad-nil-guard.yml b/tools/semgrep/ci/bad-nil-guard.yml index 70003690f72fe..f5fd122e8c965 100644 --- a/tools/semgrep/ci/bad-nil-guard.yml +++ b/tools/semgrep/ci/bad-nil-guard.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: bad-nil-guard patterns: diff --git a/tools/semgrep/ci/error-shadowing.yml b/tools/semgrep/ci/error-shadowing.yml index 43ea1a3eb9fd0..8362df5f7a7ec 100644 --- a/tools/semgrep/ci/error-shadowing.yml +++ b/tools/semgrep/ci/error-shadowing.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: error-shadow-check-types patterns: diff --git a/tools/semgrep/ci/fmt-printf.yml b/tools/semgrep/ci/fmt-printf.yml index fc6e824446a00..18777cabeffc4 100644 --- a/tools/semgrep/ci/fmt-printf.yml +++ b/tools/semgrep/ci/fmt-printf.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: fmt.Printf languages: [go] diff --git a/tools/semgrep/ci/hashsum.yml b/tools/semgrep/ci/hashsum.yml index 82765a12a262a..47dfc02a98c40 100644 --- a/tools/semgrep/ci/hashsum.yml +++ b/tools/semgrep/ci/hashsum.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: hash-sum-without-write patterns: diff --git a/tools/semgrep/ci/hmac-bytes.yml b/tools/semgrep/ci/hmac-bytes.yml index d8da277064a2b..629a8fe6c5b53 100644 --- a/tools/semgrep/ci/hmac-bytes.yml +++ b/tools/semgrep/ci/hmac-bytes.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: use-hmac-equal patterns: diff --git a/tools/semgrep/ci/hmac-hash.yml b/tools/semgrep/ci/hmac-hash.yml index 2b03883c4a512..625d271c18b41 100644 --- a/tools/semgrep/ci/hmac-hash.yml +++ b/tools/semgrep/ci/hmac-hash.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: hmac-needs-new patterns: diff --git a/tools/semgrep/ci/logger-format-string.yml b/tools/semgrep/ci/logger-format-string.yml index 14cb6cd4276ca..bb1b83e209bc3 100644 --- a/tools/semgrep/ci/logger-format-string.yml +++ b/tools/semgrep/ci/logger-format-string.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: logger-used-with-format-string patterns: diff --git a/tools/semgrep/ci/loop-time-after.yml b/tools/semgrep/ci/loop-time-after.yml deleted file mode 100644 index e3a5183a1fd69..0000000000000 --- a/tools/semgrep/ci/loop-time-after.yml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -rules: - - id: loop-time-after - pattern: | - for ... { - ... - select { - case ... - case <-time.After(...): - ... - case ... - } - ... - } - message: <-time.After() used in for loop, consider using a ticker or a timer instead - languages: - - go - severity: WARNING \ No newline at end of file diff --git a/tools/semgrep/ci/loopclosure.yml b/tools/semgrep/ci/loopclosure.yml index 88ab134c5dff3..967376127db86 100644 --- a/tools/semgrep/ci/loopclosure.yml +++ b/tools/semgrep/ci/loopclosure.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: loopclosure patterns: diff --git a/tools/semgrep/ci/no-nil-check.yml b/tools/semgrep/ci/no-nil-check.yml index 0b1f1ce372051..8697c08ce4ac9 100644 --- a/tools/semgrep/ci/no-nil-check.yml +++ b/tools/semgrep/ci/no-nil-check.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: nil-check-logical-storage patterns: @@ -65,7 +62,7 @@ rules: severity: ERROR # NamespaceByID - - id: nil-check-physical-storage-by-nsid + - id: nil-check-physical-storage patterns: - pattern-either: - pattern: | diff --git a/tools/semgrep/ci/oddifsequence.yml b/tools/semgrep/ci/oddifsequence.yml index 77b71b6a2a635..bee36d06fad23 100644 --- a/tools/semgrep/ci/oddifsequence.yml +++ b/tools/semgrep/ci/oddifsequence.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: odd-sequence-ifs patterns: diff --git a/tools/semgrep/ci/return-nil-error.yml b/tools/semgrep/ci/return-nil-error.yml index a91e4eaecd6d9..0b6f7f677c653 100644 --- a/tools/semgrep/ci/return-nil-error.yml +++ b/tools/semgrep/ci/return-nil-error.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: return-nil patterns: diff --git a/tools/semgrep/ci/return-nil.yml b/tools/semgrep/ci/return-nil.yml index 2a6447cef7103..aa3975647e7a2 100644 --- a/tools/semgrep/ci/return-nil.yml +++ b/tools/semgrep/ci/return-nil.yml @@ -1,8 +1,5 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - - id: hc-return-nil + - id: return-nil patterns: - pattern-either: - pattern: | diff --git a/tools/semgrep/ci/wrongerrcall.yml b/tools/semgrep/ci/wrongerrcall.yml index 315e26d5f2d08..5a1a4d37af8d8 100644 --- a/tools/semgrep/ci/wrongerrcall.yml +++ b/tools/semgrep/ci/wrongerrcall.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: maybe-wrong-err patterns: diff --git a/tools/semgrep/ci/wronglock.yml b/tools/semgrep/ci/wronglock.yml index 126a5446a4fd5..5f8422ce46cad 100644 --- a/tools/semgrep/ci/wronglock.yml +++ b/tools/semgrep/ci/wronglock.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: wrong-lock-unlock patterns: diff --git a/tools/semgrep/hostport.yml b/tools/semgrep/hostport.yml index 28613ecd90364..c47510e8814b2 100644 --- a/tools/semgrep/hostport.yml +++ b/tools/semgrep/hostport.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - # https://github.com/golang/go/issues/28308, from @stapelberg rules: - id: sprintf-host-port diff --git a/tools/semgrep/joinpath.yml b/tools/semgrep/joinpath.yml index ec27127bfa030..173aba0ab0a0e 100644 --- a/tools/semgrep/joinpath.yml +++ b/tools/semgrep/joinpath.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: use-strings-join-path patterns: diff --git a/tools/semgrep/lock-not-unlocked-on-return.yml b/tools/semgrep/lock-not-unlocked-on-return.yml index 6482b71945992..2d097f1aa0ff7 100644 --- a/tools/semgrep/lock-not-unlocked-on-return.yml +++ b/tools/semgrep/lock-not-unlocked-on-return.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: lock_not_unlocked message: | diff --git a/tools/semgrep/logger-sprintf.yml b/tools/semgrep/logger-sprintf.yml index 7d2f48bcdc3ec..3f58ba18ba8f7 100644 --- a/tools/semgrep/logger-sprintf.yml +++ b/tools/semgrep/logger-sprintf.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: logger-used-with-sprintf patterns: diff --git a/tools/semgrep/paths-with-callbacks-and-operations.yml b/tools/semgrep/paths-with-callbacks-and-operations.yml index e29cbab65bafd..08e9c1ec1e396 100644 --- a/tools/semgrep/paths-with-callbacks-and-operations.yml +++ b/tools/semgrep/paths-with-callbacks-and-operations.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: path-has-both-callbacks-and-operations patterns: diff --git a/tools/semgrep/paths-with-callbacks.yml b/tools/semgrep/paths-with-callbacks.yml index 9049a1d370a33..3a122cc6d9e4a 100644 --- a/tools/semgrep/paths-with-callbacks.yml +++ b/tools/semgrep/paths-with-callbacks.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: uses-path-callbacks patterns: diff --git a/tools/semgrep/physical-storage.yml b/tools/semgrep/physical-storage.yml index e7e978cc75a9d..970c77693ed36 100644 --- a/tools/semgrep/physical-storage.yml +++ b/tools/semgrep/physical-storage.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: physical-storage-bypass-encryption patterns: diff --git a/tools/semgrep/replication-has-state.yml b/tools/semgrep/replication-has-state.yml index 7868e328087e5..416a59e6af6a2 100644 --- a/tools/semgrep/replication-has-state.yml +++ b/tools/semgrep/replication-has-state.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: replication-state-should-use-IsPerfSecondary patterns: diff --git a/tools/semgrep/self-equals.yml b/tools/semgrep/self-equals.yml index ae7c1ff8c93c7..7cc5243f2b075 100644 --- a/tools/semgrep/self-equals.yml +++ b/tools/semgrep/self-equals.yml @@ -1,6 +1,3 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - rules: - id: self-equals patterns: diff --git a/tools/tools.go b/tools/tools.go index a3f743244ffad..061c963382f79 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - //go:build tools // This file ensures tool dependencies are kept in sync. This is the @@ -19,8 +16,6 @@ package tools //go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go //go:generate go install google.golang.org/grpc/cmd/protoc-gen-go-grpc //go:generate go install github.com/favadi/protoc-go-inject-tag -//go:generate go install honnef.co/go/tools/cmd/staticcheck -//go:generate go install github.com/golangci/revgrep/cmd/revgrep //go:generate go install gotest.tools/gotestsum import ( _ "golang.org/x/tools/cmd/goimports" @@ -35,9 +30,5 @@ import ( _ "github.com/favadi/protoc-go-inject-tag" - _ "github.com/golangci/revgrep/cmd/revgrep" - _ "gotest.tools/gotestsum" - - _ "honnef.co/go/tools/cmd/staticcheck" ) diff --git a/ui/.ember-cli b/ui/.ember-cli index f6e59871ff52d..4202ec1b2d71a 100644 --- a/ui/.ember-cli +++ b/ui/.ember-cli @@ -6,11 +6,5 @@ Setting `disableAnalytics` to true will prevent any data from being sent. */ "disableAnalytics": true, - "output-path": "../http/web_ui", - - /** - Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript - rather than JavaScript by default, when a TypeScript version of a given blueprint is available. - */ - "isTypeScriptProject": false + "output-path": "../http/web_ui" } diff --git a/ui/.eslintignore b/ui/.eslintignore index c352da9c77be8..5bd267e8e9d7c 100644 --- a/ui/.eslintignore +++ b/ui/.eslintignore @@ -20,11 +20,6 @@ # ember-try /.node_modules.ember-try/ /bower.json.ember-try -/npm-shrinkwrap.json.ember-try /package.json.ember-try -/package-lock.json.ember-try -/yarn.lock.ember-try -/tests/helpers/vault-keys.js -# typescript declaration files -*.d.ts +/tests/helpers/vault-keys.js diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js index bb4548319848e..9e626cbc17bba 100644 --- a/ui/.eslintrc.js +++ b/ui/.eslintrc.js @@ -1,8 +1,3 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - /* eslint-disable no-undef */ 'use strict'; @@ -28,8 +23,7 @@ module.exports = { browser: true, }, rules: { - 'no-console': 'error', - 'prefer-const': ['error', { destructuring: 'all' }], + 'no-console': 'warn', 'ember/no-mixins': 'warn', 'ember/no-new-mixins': 'off', // should be warn but then every line of the mixin is green // need to be fully glimmerized before these rules can be turned on @@ -69,13 +63,9 @@ module.exports = { }, }, { - // test files + // Test files: files: ['tests/**/*-test.{js,ts}'], extends: ['plugin:qunit/recommended'], }, - { - files: ['**/*.ts'], - extends: ['plugin:@typescript-eslint/recommended'], - }, ], }; diff --git a/ui/.gitignore b/ui/.gitignore index f51263f3dd9b3..0474208ee07e0 100644 --- a/ui/.gitignore +++ b/ui/.gitignore @@ -22,19 +22,4 @@ package-lock.json # ember-try /.node_modules.ember-try/ /bower.json.ember-try -/npm-shrinkwrap.json.ember-try /package.json.ember-try -/package-lock.json.ember-try -/yarn.lock.ember-try - -# broccoli-debug -/DEBUG/ - -# yarn -.pnp.* -.yarn/* -!.yarn/patches -!.yarn/plugins -!.yarn/releases -!.yarn/sdks -!.yarn/versions diff --git a/ui/.nvmrc b/ui/.nvmrc new file mode 100644 index 0000000000000..958b5a36e1fad --- /dev/null +++ b/ui/.nvmrc @@ -0,0 +1 @@ +v14 diff --git a/ui/.prettierignore b/ui/.prettierignore index 4178fd571e680..9221655522bb5 100644 --- a/ui/.prettierignore +++ b/ui/.prettierignore @@ -14,12 +14,8 @@ /coverage/ !.* .eslintcache -.lint-todo/ # ember-try /.node_modules.ember-try/ /bower.json.ember-try -/npm-shrinkwrap.json.ember-try /package.json.ember-try -/package-lock.json.ember-try -/yarn.lock.ember-try diff --git a/ui/.prettierrc.js b/ui/.prettierrc.js index 8c776351a4545..8f507fd9b33d0 100644 --- a/ui/.prettierrc.js +++ b/ui/.prettierrc.js @@ -1,8 +1,3 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - 'use strict'; module.exports = { diff --git a/ui/.template-lintrc.js b/ui/.template-lintrc.js index 3540521890a8c..f61b165724d1b 100644 --- a/ui/.template-lintrc.js +++ b/ui/.template-lintrc.js @@ -1,8 +1,3 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - 'use strict'; const fs = require('fs'); @@ -34,18 +29,22 @@ try { prettier: false, }; } catch (error) { - console.log(error); // eslint-disable-line + console.log(error); } module.exports = { plugins: ['ember-template-lint-plugin-prettier'], extends: ['recommended', 'ember-template-lint-plugin-prettier:recommended'], rules: { + 'no-bare-strings': 'off', 'no-action': 'off', + 'no-duplicate-landmark-elements': 'warn', 'no-implicit-this': { allow: ['supported-auth-backends'], }, 'require-input-label': 'off', + 'no-down-event-binding': 'warn', + 'self-closing-void-elements': 'off', }, ignore: ['lib/story-md', 'tests/**'], // ember language server vscode extension does not currently respect the ignore field diff --git a/ui/.yarn/releases/yarn-1.19.1.js b/ui/.yarn/releases/yarn-1.19.1.js new file mode 100755 index 0000000000000..3907b87325d00 --- /dev/null +++ b/ui/.yarn/releases/yarn-1.19.1.js @@ -0,0 +1,147216 @@ +#!/usr/bin/env node +module.exports = +/******/ (function(modules) { // webpackBootstrap +/******/ // The module cache +/******/ var installedModules = {}; +/******/ +/******/ // The require function +/******/ function __webpack_require__(moduleId) { +/******/ +/******/ // Check if module is in cache +/******/ if(installedModules[moduleId]) { +/******/ return installedModules[moduleId].exports; +/******/ } +/******/ // Create a new module (and put it into the cache) +/******/ var module = installedModules[moduleId] = { +/******/ i: moduleId, +/******/ l: false, +/******/ exports: {} +/******/ }; +/******/ +/******/ // Execute the module function +/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); +/******/ +/******/ // Flag the module as loaded +/******/ module.l = true; +/******/ +/******/ // Return the exports of the module +/******/ return module.exports; +/******/ } +/******/ +/******/ +/******/ // expose the modules object (__webpack_modules__) +/******/ __webpack_require__.m = modules; +/******/ +/******/ // expose the module cache +/******/ __webpack_require__.c = installedModules; +/******/ +/******/ // identity function for calling harmony imports with the correct context +/******/ __webpack_require__.i = function(value) { return value; }; +/******/ +/******/ // define getter function for harmony exports +/******/ __webpack_require__.d = function(exports, name, getter) { +/******/ if(!__webpack_require__.o(exports, name)) { +/******/ Object.defineProperty(exports, name, { +/******/ configurable: false, +/******/ enumerable: true, +/******/ get: getter +/******/ }); +/******/ } +/******/ }; +/******/ +/******/ // getDefaultExport function for compatibility with non-harmony modules +/******/ __webpack_require__.n = function(module) { +/******/ var getter = module && module.__esModule ? +/******/ function getDefault() { return module['default']; } : +/******/ function getModuleExports() { return module; }; +/******/ __webpack_require__.d(getter, 'a', getter); +/******/ return getter; +/******/ }; +/******/ +/******/ // Object.prototype.hasOwnProperty.call +/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; +/******/ +/******/ // __webpack_public_path__ +/******/ __webpack_require__.p = ""; +/******/ +/******/ // Load entry module and return exports +/******/ return __webpack_require__(__webpack_require__.s = 549); +/******/ }) +/************************************************************************/ +/******/ ([ +/* 0 */ +/***/ (function(module, exports) { + +module.exports = require("path"); + +/***/ }), +/* 1 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (immutable) */ __webpack_exports__["a"] = __extends; +/* unused harmony export __assign */ +/* unused harmony export __rest */ +/* unused harmony export __decorate */ +/* unused harmony export __param */ +/* unused harmony export __metadata */ +/* unused harmony export __awaiter */ +/* unused harmony export __generator */ +/* unused harmony export __exportStar */ +/* unused harmony export __values */ +/* unused harmony export __read */ +/* unused harmony export __spread */ +/* unused harmony export __await */ +/* unused harmony export __asyncGenerator */ +/* unused harmony export __asyncDelegator */ +/* unused harmony export __asyncValues */ +/* unused harmony export __makeTemplateObject */ +/* unused harmony export __importStar */ +/* unused harmony export __importDefault */ +/*! ***************************************************************************** +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** */ +/* global Reflect, Promise */ + +var extendStatics = function(d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; + return extendStatics(d, b); +}; + +function __extends(d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); +} + +var __assign = function() { + __assign = Object.assign || function __assign(t) { + for (var s, i = 1, n = arguments.length; i < n; i++) { + s = arguments[i]; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; + } + return t; + } + return __assign.apply(this, arguments); +} + +function __rest(s, e) { + var t = {}; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0) + t[p] = s[p]; + if (s != null && typeof Object.getOwnPropertySymbols === "function") + for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) if (e.indexOf(p[i]) < 0) + t[p[i]] = s[p[i]]; + return t; +} + +function __decorate(decorators, target, key, desc) { + var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; + if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); + else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; + return c > 3 && r && Object.defineProperty(target, key, r), r; +} + +function __param(paramIndex, decorator) { + return function (target, key) { decorator(target, key, paramIndex); } +} + +function __metadata(metadataKey, metadataValue) { + if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(metadataKey, metadataValue); +} + +function __awaiter(thisArg, _arguments, P, generator) { + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +} + +function __generator(thisArg, body) { + var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; + return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; + function verb(n) { return function (v) { return step([n, v]); }; } + function step(op) { + if (f) throw new TypeError("Generator is already executing."); + while (_) try { + if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; + if (y = 0, t) op = [op[0] & 2, t.value]; + switch (op[0]) { + case 0: case 1: t = op; break; + case 4: _.label++; return { value: op[1], done: false }; + case 5: _.label++; y = op[1]; op = [0]; continue; + case 7: op = _.ops.pop(); _.trys.pop(); continue; + default: + if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } + if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } + if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } + if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } + if (t[2]) _.ops.pop(); + _.trys.pop(); continue; + } + op = body.call(thisArg, _); + } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } + if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; + } +} + +function __exportStar(m, exports) { + for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; +} + +function __values(o) { + var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; + if (m) return m.call(o); + return { + next: function () { + if (o && i >= o.length) o = void 0; + return { value: o && o[i++], done: !o }; + } + }; +} + +function __read(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); + } + catch (error) { e = { error: error }; } + finally { + try { + if (r && !r.done && (m = i["return"])) m.call(i); + } + finally { if (e) throw e.error; } + } + return ar; +} + +function __spread() { + for (var ar = [], i = 0; i < arguments.length; i++) + ar = ar.concat(__read(arguments[i])); + return ar; +} + +function __await(v) { + return this instanceof __await ? (this.v = v, this) : new __await(v); +} + +function __asyncGenerator(thisArg, _arguments, generator) { + if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); + var g = generator.apply(thisArg, _arguments || []), i, q = []; + return i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i; + function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; } + function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } } + function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); } + function fulfill(value) { resume("next", value); } + function reject(value) { resume("throw", value); } + function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); } +} + +function __asyncDelegator(o) { + var i, p; + return i = {}, verb("next"), verb("throw", function (e) { throw e; }), verb("return"), i[Symbol.iterator] = function () { return this; }, i; + function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === "return" } : f ? f(v) : v; } : f; } +} + +function __asyncValues(o) { + if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); + var m = o[Symbol.asyncIterator], i; + return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i); + function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; } + function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); } +} + +function __makeTemplateObject(cooked, raw) { + if (Object.defineProperty) { Object.defineProperty(cooked, "raw", { value: raw }); } else { cooked.raw = raw; } + return cooked; +}; + +function __importStar(mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; + result.default = mod; + return result; +} + +function __importDefault(mod) { + return (mod && mod.__esModule) ? mod : { default: mod }; +} + + +/***/ }), +/* 2 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +exports.__esModule = true; + +var _promise = __webpack_require__(227); + +var _promise2 = _interopRequireDefault(_promise); + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +exports.default = function (fn) { + return function () { + var gen = fn.apply(this, arguments); + return new _promise2.default(function (resolve, reject) { + function step(key, arg) { + try { + var info = gen[key](arg); + var value = info.value; + } catch (error) { + reject(error); + return; + } + + if (info.done) { + resolve(value); + } else { + return _promise2.default.resolve(value).then(function (value) { + step("next", value); + }, function (err) { + step("throw", err); + }); + } + } + + return step("next"); + }); + }; +}; + +/***/ }), +/* 3 */ +/***/ (function(module, exports) { + +module.exports = require("util"); + +/***/ }), +/* 4 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.getFirstSuitableFolder = exports.readFirstAvailableStream = exports.makeTempDir = exports.hardlinksWork = exports.writeFilePreservingEol = exports.getFileSizeOnDisk = exports.walk = exports.symlink = exports.find = exports.readJsonAndFile = exports.readJson = exports.readFileAny = exports.hardlinkBulk = exports.copyBulk = exports.unlink = exports.glob = exports.link = exports.chmod = exports.lstat = exports.exists = exports.mkdirp = exports.stat = exports.access = exports.rename = exports.readdir = exports.realpath = exports.readlink = exports.writeFile = exports.open = exports.readFileBuffer = exports.lockQueue = exports.constants = undefined; + +var _asyncToGenerator2; + +function _load_asyncToGenerator() { + return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); +} + +let buildActionsForCopy = (() => { + var _ref = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, events, possibleExtraneous, reporter) { + + // + let build = (() => { + var _ref5 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { + const src = data.src, + dest = data.dest, + type = data.type; + + const onFresh = data.onFresh || noop; + const onDone = data.onDone || noop; + + // TODO https://github.com/yarnpkg/yarn/issues/3751 + // related to bundled dependencies handling + if (files.has(dest.toLowerCase())) { + reporter.verbose(`The case-insensitive file ${dest} shouldn't be copied twice in one bulk copy`); + } else { + files.add(dest.toLowerCase()); + } + + if (type === 'symlink') { + yield mkdirp((_path || _load_path()).default.dirname(dest)); + onFresh(); + actions.symlink.push({ + dest, + linkname: src + }); + onDone(); + return; + } + + if (events.ignoreBasenames.indexOf((_path || _load_path()).default.basename(src)) >= 0) { + // ignored file + return; + } + + const srcStat = yield lstat(src); + let srcFiles; + + if (srcStat.isDirectory()) { + srcFiles = yield readdir(src); + } + + let destStat; + try { + // try accessing the destination + destStat = yield lstat(dest); + } catch (e) { + // proceed if destination doesn't exist, otherwise error + if (e.code !== 'ENOENT') { + throw e; + } + } + + // if destination exists + if (destStat) { + const bothSymlinks = srcStat.isSymbolicLink() && destStat.isSymbolicLink(); + const bothFolders = srcStat.isDirectory() && destStat.isDirectory(); + const bothFiles = srcStat.isFile() && destStat.isFile(); + + // EINVAL access errors sometimes happen which shouldn't because node shouldn't be giving + // us modes that aren't valid. investigate this, it's generally safe to proceed. + + /* if (srcStat.mode !== destStat.mode) { + try { + await access(dest, srcStat.mode); + } catch (err) {} + } */ + + if (bothFiles && artifactFiles.has(dest)) { + // this file gets changed during build, likely by a custom install script. Don't bother checking it. + onDone(); + reporter.verbose(reporter.lang('verboseFileSkipArtifact', src)); + return; + } + + if (bothFiles && srcStat.size === destStat.size && (0, (_fsNormalized || _load_fsNormalized()).fileDatesEqual)(srcStat.mtime, destStat.mtime)) { + // we can safely assume this is the same file + onDone(); + reporter.verbose(reporter.lang('verboseFileSkip', src, dest, srcStat.size, +srcStat.mtime)); + return; + } + + if (bothSymlinks) { + const srcReallink = yield readlink(src); + if (srcReallink === (yield readlink(dest))) { + // if both symlinks are the same then we can continue on + onDone(); + reporter.verbose(reporter.lang('verboseFileSkipSymlink', src, dest, srcReallink)); + return; + } + } + + if (bothFolders) { + // mark files that aren't in this folder as possibly extraneous + const destFiles = yield readdir(dest); + invariant(srcFiles, 'src files not initialised'); + + for (var _iterator4 = destFiles, _isArray4 = Array.isArray(_iterator4), _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) { + var _ref6; + + if (_isArray4) { + if (_i4 >= _iterator4.length) break; + _ref6 = _iterator4[_i4++]; + } else { + _i4 = _iterator4.next(); + if (_i4.done) break; + _ref6 = _i4.value; + } + + const file = _ref6; + + if (srcFiles.indexOf(file) < 0) { + const loc = (_path || _load_path()).default.join(dest, file); + possibleExtraneous.add(loc); + + if ((yield lstat(loc)).isDirectory()) { + for (var _iterator5 = yield readdir(loc), _isArray5 = Array.isArray(_iterator5), _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) { + var _ref7; + + if (_isArray5) { + if (_i5 >= _iterator5.length) break; + _ref7 = _iterator5[_i5++]; + } else { + _i5 = _iterator5.next(); + if (_i5.done) break; + _ref7 = _i5.value; + } + + const file = _ref7; + + possibleExtraneous.add((_path || _load_path()).default.join(loc, file)); + } + } + } + } + } + } + + if (destStat && destStat.isSymbolicLink()) { + yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dest); + destStat = null; + } + + if (srcStat.isSymbolicLink()) { + onFresh(); + const linkname = yield readlink(src); + actions.symlink.push({ + dest, + linkname + }); + onDone(); + } else if (srcStat.isDirectory()) { + if (!destStat) { + reporter.verbose(reporter.lang('verboseFileFolder', dest)); + yield mkdirp(dest); + } + + const destParts = dest.split((_path || _load_path()).default.sep); + while (destParts.length) { + files.add(destParts.join((_path || _load_path()).default.sep).toLowerCase()); + destParts.pop(); + } + + // push all files to queue + invariant(srcFiles, 'src files not initialised'); + let remaining = srcFiles.length; + if (!remaining) { + onDone(); + } + for (var _iterator6 = srcFiles, _isArray6 = Array.isArray(_iterator6), _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) { + var _ref8; + + if (_isArray6) { + if (_i6 >= _iterator6.length) break; + _ref8 = _iterator6[_i6++]; + } else { + _i6 = _iterator6.next(); + if (_i6.done) break; + _ref8 = _i6.value; + } + + const file = _ref8; + + queue.push({ + dest: (_path || _load_path()).default.join(dest, file), + onFresh, + onDone: function (_onDone) { + function onDone() { + return _onDone.apply(this, arguments); + } + + onDone.toString = function () { + return _onDone.toString(); + }; + + return onDone; + }(function () { + if (--remaining === 0) { + onDone(); + } + }), + src: (_path || _load_path()).default.join(src, file) + }); + } + } else if (srcStat.isFile()) { + onFresh(); + actions.file.push({ + src, + dest, + atime: srcStat.atime, + mtime: srcStat.mtime, + mode: srcStat.mode + }); + onDone(); + } else { + throw new Error(`unsure how to copy this: ${src}`); + } + }); + + return function build(_x5) { + return _ref5.apply(this, arguments); + }; + })(); + + const artifactFiles = new Set(events.artifactFiles || []); + const files = new Set(); + + // initialise events + for (var _iterator = queue, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { + var _ref2; + + if (_isArray) { + if (_i >= _iterator.length) break; + _ref2 = _iterator[_i++]; + } else { + _i = _iterator.next(); + if (_i.done) break; + _ref2 = _i.value; + } + + const item = _ref2; + + const onDone = item.onDone; + item.onDone = function () { + events.onProgress(item.dest); + if (onDone) { + onDone(); + } + }; + } + events.onStart(queue.length); + + // start building actions + const actions = { + file: [], + symlink: [], + link: [] + }; + + // custom concurrency logic as we're always executing stacks of CONCURRENT_QUEUE_ITEMS queue items + // at a time due to the requirement to push items onto the queue + while (queue.length) { + const items = queue.splice(0, CONCURRENT_QUEUE_ITEMS); + yield Promise.all(items.map(build)); + } + + // simulate the existence of some files to prevent considering them extraneous + for (var _iterator2 = artifactFiles, _isArray2 = Array.isArray(_iterator2), _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) { + var _ref3; + + if (_isArray2) { + if (_i2 >= _iterator2.length) break; + _ref3 = _iterator2[_i2++]; + } else { + _i2 = _iterator2.next(); + if (_i2.done) break; + _ref3 = _i2.value; + } + + const file = _ref3; + + if (possibleExtraneous.has(file)) { + reporter.verbose(reporter.lang('verboseFilePhantomExtraneous', file)); + possibleExtraneous.delete(file); + } + } + + for (var _iterator3 = possibleExtraneous, _isArray3 = Array.isArray(_iterator3), _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) { + var _ref4; + + if (_isArray3) { + if (_i3 >= _iterator3.length) break; + _ref4 = _iterator3[_i3++]; + } else { + _i3 = _iterator3.next(); + if (_i3.done) break; + _ref4 = _i3.value; + } + + const loc = _ref4; + + if (files.has(loc.toLowerCase())) { + possibleExtraneous.delete(loc); + } + } + + return actions; + }); + + return function buildActionsForCopy(_x, _x2, _x3, _x4) { + return _ref.apply(this, arguments); + }; +})(); + +let buildActionsForHardlink = (() => { + var _ref9 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, events, possibleExtraneous, reporter) { + + // + let build = (() => { + var _ref13 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { + const src = data.src, + dest = data.dest; + + const onFresh = data.onFresh || noop; + const onDone = data.onDone || noop; + if (files.has(dest.toLowerCase())) { + // Fixes issue https://github.com/yarnpkg/yarn/issues/2734 + // When bulk hardlinking we have A -> B structure that we want to hardlink to A1 -> B1, + // package-linker passes that modules A1 and B1 need to be hardlinked, + // the recursive linking algorithm of A1 ends up scheduling files in B1 to be linked twice which will case + // an exception. + onDone(); + return; + } + files.add(dest.toLowerCase()); + + if (events.ignoreBasenames.indexOf((_path || _load_path()).default.basename(src)) >= 0) { + // ignored file + return; + } + + const srcStat = yield lstat(src); + let srcFiles; + + if (srcStat.isDirectory()) { + srcFiles = yield readdir(src); + } + + const destExists = yield exists(dest); + if (destExists) { + const destStat = yield lstat(dest); + + const bothSymlinks = srcStat.isSymbolicLink() && destStat.isSymbolicLink(); + const bothFolders = srcStat.isDirectory() && destStat.isDirectory(); + const bothFiles = srcStat.isFile() && destStat.isFile(); + + if (srcStat.mode !== destStat.mode) { + try { + yield access(dest, srcStat.mode); + } catch (err) { + // EINVAL access errors sometimes happen which shouldn't because node shouldn't be giving + // us modes that aren't valid. investigate this, it's generally safe to proceed. + reporter.verbose(err); + } + } + + if (bothFiles && artifactFiles.has(dest)) { + // this file gets changed during build, likely by a custom install script. Don't bother checking it. + onDone(); + reporter.verbose(reporter.lang('verboseFileSkipArtifact', src)); + return; + } + + // correct hardlink + if (bothFiles && srcStat.ino !== null && srcStat.ino === destStat.ino) { + onDone(); + reporter.verbose(reporter.lang('verboseFileSkip', src, dest, srcStat.ino)); + return; + } + + if (bothSymlinks) { + const srcReallink = yield readlink(src); + if (srcReallink === (yield readlink(dest))) { + // if both symlinks are the same then we can continue on + onDone(); + reporter.verbose(reporter.lang('verboseFileSkipSymlink', src, dest, srcReallink)); + return; + } + } + + if (bothFolders) { + // mark files that aren't in this folder as possibly extraneous + const destFiles = yield readdir(dest); + invariant(srcFiles, 'src files not initialised'); + + for (var _iterator10 = destFiles, _isArray10 = Array.isArray(_iterator10), _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) { + var _ref14; + + if (_isArray10) { + if (_i10 >= _iterator10.length) break; + _ref14 = _iterator10[_i10++]; + } else { + _i10 = _iterator10.next(); + if (_i10.done) break; + _ref14 = _i10.value; + } + + const file = _ref14; + + if (srcFiles.indexOf(file) < 0) { + const loc = (_path || _load_path()).default.join(dest, file); + possibleExtraneous.add(loc); + + if ((yield lstat(loc)).isDirectory()) { + for (var _iterator11 = yield readdir(loc), _isArray11 = Array.isArray(_iterator11), _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) { + var _ref15; + + if (_isArray11) { + if (_i11 >= _iterator11.length) break; + _ref15 = _iterator11[_i11++]; + } else { + _i11 = _iterator11.next(); + if (_i11.done) break; + _ref15 = _i11.value; + } + + const file = _ref15; + + possibleExtraneous.add((_path || _load_path()).default.join(loc, file)); + } + } + } + } + } + } + + if (srcStat.isSymbolicLink()) { + onFresh(); + const linkname = yield readlink(src); + actions.symlink.push({ + dest, + linkname + }); + onDone(); + } else if (srcStat.isDirectory()) { + reporter.verbose(reporter.lang('verboseFileFolder', dest)); + yield mkdirp(dest); + + const destParts = dest.split((_path || _load_path()).default.sep); + while (destParts.length) { + files.add(destParts.join((_path || _load_path()).default.sep).toLowerCase()); + destParts.pop(); + } + + // push all files to queue + invariant(srcFiles, 'src files not initialised'); + let remaining = srcFiles.length; + if (!remaining) { + onDone(); + } + for (var _iterator12 = srcFiles, _isArray12 = Array.isArray(_iterator12), _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) { + var _ref16; + + if (_isArray12) { + if (_i12 >= _iterator12.length) break; + _ref16 = _iterator12[_i12++]; + } else { + _i12 = _iterator12.next(); + if (_i12.done) break; + _ref16 = _i12.value; + } + + const file = _ref16; + + queue.push({ + onFresh, + src: (_path || _load_path()).default.join(src, file), + dest: (_path || _load_path()).default.join(dest, file), + onDone: function (_onDone2) { + function onDone() { + return _onDone2.apply(this, arguments); + } + + onDone.toString = function () { + return _onDone2.toString(); + }; + + return onDone; + }(function () { + if (--remaining === 0) { + onDone(); + } + }) + }); + } + } else if (srcStat.isFile()) { + onFresh(); + actions.link.push({ + src, + dest, + removeDest: destExists + }); + onDone(); + } else { + throw new Error(`unsure how to copy this: ${src}`); + } + }); + + return function build(_x10) { + return _ref13.apply(this, arguments); + }; + })(); + + const artifactFiles = new Set(events.artifactFiles || []); + const files = new Set(); + + // initialise events + for (var _iterator7 = queue, _isArray7 = Array.isArray(_iterator7), _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) { + var _ref10; + + if (_isArray7) { + if (_i7 >= _iterator7.length) break; + _ref10 = _iterator7[_i7++]; + } else { + _i7 = _iterator7.next(); + if (_i7.done) break; + _ref10 = _i7.value; + } + + const item = _ref10; + + const onDone = item.onDone || noop; + item.onDone = function () { + events.onProgress(item.dest); + onDone(); + }; + } + events.onStart(queue.length); + + // start building actions + const actions = { + file: [], + symlink: [], + link: [] + }; + + // custom concurrency logic as we're always executing stacks of CONCURRENT_QUEUE_ITEMS queue items + // at a time due to the requirement to push items onto the queue + while (queue.length) { + const items = queue.splice(0, CONCURRENT_QUEUE_ITEMS); + yield Promise.all(items.map(build)); + } + + // simulate the existence of some files to prevent considering them extraneous + for (var _iterator8 = artifactFiles, _isArray8 = Array.isArray(_iterator8), _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) { + var _ref11; + + if (_isArray8) { + if (_i8 >= _iterator8.length) break; + _ref11 = _iterator8[_i8++]; + } else { + _i8 = _iterator8.next(); + if (_i8.done) break; + _ref11 = _i8.value; + } + + const file = _ref11; + + if (possibleExtraneous.has(file)) { + reporter.verbose(reporter.lang('verboseFilePhantomExtraneous', file)); + possibleExtraneous.delete(file); + } + } + + for (var _iterator9 = possibleExtraneous, _isArray9 = Array.isArray(_iterator9), _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) { + var _ref12; + + if (_isArray9) { + if (_i9 >= _iterator9.length) break; + _ref12 = _iterator9[_i9++]; + } else { + _i9 = _iterator9.next(); + if (_i9.done) break; + _ref12 = _i9.value; + } + + const loc = _ref12; + + if (files.has(loc.toLowerCase())) { + possibleExtraneous.delete(loc); + } + } + + return actions; + }); + + return function buildActionsForHardlink(_x6, _x7, _x8, _x9) { + return _ref9.apply(this, arguments); + }; +})(); + +let copyBulk = exports.copyBulk = (() => { + var _ref17 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, reporter, _events) { + const events = { + onStart: _events && _events.onStart || noop, + onProgress: _events && _events.onProgress || noop, + possibleExtraneous: _events ? _events.possibleExtraneous : new Set(), + ignoreBasenames: _events && _events.ignoreBasenames || [], + artifactFiles: _events && _events.artifactFiles || [] + }; + + const actions = yield buildActionsForCopy(queue, events, events.possibleExtraneous, reporter); + events.onStart(actions.file.length + actions.symlink.length + actions.link.length); + + const fileActions = actions.file; + + const currentlyWriting = new Map(); + + yield (_promise || _load_promise()).queue(fileActions, (() => { + var _ref18 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { + let writePromise; + while (writePromise = currentlyWriting.get(data.dest)) { + yield writePromise; + } + + reporter.verbose(reporter.lang('verboseFileCopy', data.src, data.dest)); + const copier = (0, (_fsNormalized || _load_fsNormalized()).copyFile)(data, function () { + return currentlyWriting.delete(data.dest); + }); + currentlyWriting.set(data.dest, copier); + events.onProgress(data.dest); + return copier; + }); + + return function (_x14) { + return _ref18.apply(this, arguments); + }; + })(), CONCURRENT_QUEUE_ITEMS); + + // we need to copy symlinks last as they could reference files we were copying + const symlinkActions = actions.symlink; + yield (_promise || _load_promise()).queue(symlinkActions, function (data) { + const linkname = (_path || _load_path()).default.resolve((_path || _load_path()).default.dirname(data.dest), data.linkname); + reporter.verbose(reporter.lang('verboseFileSymlink', data.dest, linkname)); + return symlink(linkname, data.dest); + }); + }); + + return function copyBulk(_x11, _x12, _x13) { + return _ref17.apply(this, arguments); + }; +})(); + +let hardlinkBulk = exports.hardlinkBulk = (() => { + var _ref19 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, reporter, _events) { + const events = { + onStart: _events && _events.onStart || noop, + onProgress: _events && _events.onProgress || noop, + possibleExtraneous: _events ? _events.possibleExtraneous : new Set(), + artifactFiles: _events && _events.artifactFiles || [], + ignoreBasenames: [] + }; + + const actions = yield buildActionsForHardlink(queue, events, events.possibleExtraneous, reporter); + events.onStart(actions.file.length + actions.symlink.length + actions.link.length); + + const fileActions = actions.link; + + yield (_promise || _load_promise()).queue(fileActions, (() => { + var _ref20 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { + reporter.verbose(reporter.lang('verboseFileLink', data.src, data.dest)); + if (data.removeDest) { + yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(data.dest); + } + yield link(data.src, data.dest); + }); + + return function (_x18) { + return _ref20.apply(this, arguments); + }; + })(), CONCURRENT_QUEUE_ITEMS); + + // we need to copy symlinks last as they could reference files we were copying + const symlinkActions = actions.symlink; + yield (_promise || _load_promise()).queue(symlinkActions, function (data) { + const linkname = (_path || _load_path()).default.resolve((_path || _load_path()).default.dirname(data.dest), data.linkname); + reporter.verbose(reporter.lang('verboseFileSymlink', data.dest, linkname)); + return symlink(linkname, data.dest); + }); + }); + + return function hardlinkBulk(_x15, _x16, _x17) { + return _ref19.apply(this, arguments); + }; +})(); + +let readFileAny = exports.readFileAny = (() => { + var _ref21 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (files) { + for (var _iterator13 = files, _isArray13 = Array.isArray(_iterator13), _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) { + var _ref22; + + if (_isArray13) { + if (_i13 >= _iterator13.length) break; + _ref22 = _iterator13[_i13++]; + } else { + _i13 = _iterator13.next(); + if (_i13.done) break; + _ref22 = _i13.value; + } + + const file = _ref22; + + if (yield exists(file)) { + return readFile(file); + } + } + return null; + }); + + return function readFileAny(_x19) { + return _ref21.apply(this, arguments); + }; +})(); + +let readJson = exports.readJson = (() => { + var _ref23 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { + return (yield readJsonAndFile(loc)).object; + }); + + return function readJson(_x20) { + return _ref23.apply(this, arguments); + }; +})(); + +let readJsonAndFile = exports.readJsonAndFile = (() => { + var _ref24 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { + const file = yield readFile(loc); + try { + return { + object: (0, (_map || _load_map()).default)(JSON.parse(stripBOM(file))), + content: file + }; + } catch (err) { + err.message = `${loc}: ${err.message}`; + throw err; + } + }); + + return function readJsonAndFile(_x21) { + return _ref24.apply(this, arguments); + }; +})(); + +let find = exports.find = (() => { + var _ref25 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (filename, dir) { + const parts = dir.split((_path || _load_path()).default.sep); + + while (parts.length) { + const loc = parts.concat(filename).join((_path || _load_path()).default.sep); + + if (yield exists(loc)) { + return loc; + } else { + parts.pop(); + } + } + + return false; + }); + + return function find(_x22, _x23) { + return _ref25.apply(this, arguments); + }; +})(); + +let symlink = exports.symlink = (() => { + var _ref26 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (src, dest) { + if (process.platform !== 'win32') { + // use relative paths otherwise which will be retained if the directory is moved + src = (_path || _load_path()).default.relative((_path || _load_path()).default.dirname(dest), src); + // When path.relative returns an empty string for the current directory, we should instead use + // '.', which is a valid fs.symlink target. + src = src || '.'; + } + + try { + const stats = yield lstat(dest); + if (stats.isSymbolicLink()) { + const resolved = dest; + if (resolved === src) { + return; + } + } + } catch (err) { + if (err.code !== 'ENOENT') { + throw err; + } + } + + // We use rimraf for unlink which never throws an ENOENT on missing target + yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dest); + + if (process.platform === 'win32') { + // use directory junctions if possible on win32, this requires absolute paths + yield fsSymlink(src, dest, 'junction'); + } else { + yield fsSymlink(src, dest); + } + }); + + return function symlink(_x24, _x25) { + return _ref26.apply(this, arguments); + }; +})(); + +let walk = exports.walk = (() => { + var _ref27 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (dir, relativeDir, ignoreBasenames = new Set()) { + let files = []; + + let filenames = yield readdir(dir); + if (ignoreBasenames.size) { + filenames = filenames.filter(function (name) { + return !ignoreBasenames.has(name); + }); + } + + for (var _iterator14 = filenames, _isArray14 = Array.isArray(_iterator14), _i14 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) { + var _ref28; + + if (_isArray14) { + if (_i14 >= _iterator14.length) break; + _ref28 = _iterator14[_i14++]; + } else { + _i14 = _iterator14.next(); + if (_i14.done) break; + _ref28 = _i14.value; + } + + const name = _ref28; + + const relative = relativeDir ? (_path || _load_path()).default.join(relativeDir, name) : name; + const loc = (_path || _load_path()).default.join(dir, name); + const stat = yield lstat(loc); + + files.push({ + relative, + basename: name, + absolute: loc, + mtime: +stat.mtime + }); + + if (stat.isDirectory()) { + files = files.concat((yield walk(loc, relative, ignoreBasenames))); + } + } + + return files; + }); + + return function walk(_x26, _x27) { + return _ref27.apply(this, arguments); + }; +})(); + +let getFileSizeOnDisk = exports.getFileSizeOnDisk = (() => { + var _ref29 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { + const stat = yield lstat(loc); + const size = stat.size, + blockSize = stat.blksize; + + + return Math.ceil(size / blockSize) * blockSize; + }); + + return function getFileSizeOnDisk(_x28) { + return _ref29.apply(this, arguments); + }; +})(); + +let getEolFromFile = (() => { + var _ref30 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (path) { + if (!(yield exists(path))) { + return undefined; + } + + const buffer = yield readFileBuffer(path); + + for (let i = 0; i < buffer.length; ++i) { + if (buffer[i] === cr) { + return '\r\n'; + } + if (buffer[i] === lf) { + return '\n'; + } + } + return undefined; + }); + + return function getEolFromFile(_x29) { + return _ref30.apply(this, arguments); + }; +})(); + +let writeFilePreservingEol = exports.writeFilePreservingEol = (() => { + var _ref31 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (path, data) { + const eol = (yield getEolFromFile(path)) || (_os || _load_os()).default.EOL; + if (eol !== '\n') { + data = data.replace(/\n/g, eol); + } + yield writeFile(path, data); + }); + + return function writeFilePreservingEol(_x30, _x31) { + return _ref31.apply(this, arguments); + }; +})(); + +let hardlinksWork = exports.hardlinksWork = (() => { + var _ref32 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (dir) { + const filename = 'test-file' + Math.random(); + const file = (_path || _load_path()).default.join(dir, filename); + const fileLink = (_path || _load_path()).default.join(dir, filename + '-link'); + try { + yield writeFile(file, 'test'); + yield link(file, fileLink); + } catch (err) { + return false; + } finally { + yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(file); + yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(fileLink); + } + return true; + }); + + return function hardlinksWork(_x32) { + return _ref32.apply(this, arguments); + }; +})(); + +// not a strict polyfill for Node's fs.mkdtemp + + +let makeTempDir = exports.makeTempDir = (() => { + var _ref33 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (prefix) { + const dir = (_path || _load_path()).default.join((_os || _load_os()).default.tmpdir(), `yarn-${prefix || ''}-${Date.now()}-${Math.random()}`); + yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dir); + yield mkdirp(dir); + return dir; + }); + + return function makeTempDir(_x33) { + return _ref33.apply(this, arguments); + }; +})(); + +let readFirstAvailableStream = exports.readFirstAvailableStream = (() => { + var _ref34 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (paths) { + for (var _iterator15 = paths, _isArray15 = Array.isArray(_iterator15), _i15 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) { + var _ref35; + + if (_isArray15) { + if (_i15 >= _iterator15.length) break; + _ref35 = _iterator15[_i15++]; + } else { + _i15 = _iterator15.next(); + if (_i15.done) break; + _ref35 = _i15.value; + } + + const path = _ref35; + + try { + const fd = yield open(path, 'r'); + return (_fs || _load_fs()).default.createReadStream(path, { fd }); + } catch (err) { + // Try the next one + } + } + return null; + }); + + return function readFirstAvailableStream(_x34) { + return _ref34.apply(this, arguments); + }; +})(); + +let getFirstSuitableFolder = exports.getFirstSuitableFolder = (() => { + var _ref36 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (paths, mode = constants.W_OK | constants.X_OK) { + const result = { + skipped: [], + folder: null + }; + + for (var _iterator16 = paths, _isArray16 = Array.isArray(_iterator16), _i16 = 0, _iterator16 = _isArray16 ? _iterator16 : _iterator16[Symbol.iterator]();;) { + var _ref37; + + if (_isArray16) { + if (_i16 >= _iterator16.length) break; + _ref37 = _iterator16[_i16++]; + } else { + _i16 = _iterator16.next(); + if (_i16.done) break; + _ref37 = _i16.value; + } + + const folder = _ref37; + + try { + yield mkdirp(folder); + yield access(folder, mode); + + result.folder = folder; + + return result; + } catch (error) { + result.skipped.push({ + error, + folder + }); + } + } + return result; + }); + + return function getFirstSuitableFolder(_x35) { + return _ref36.apply(this, arguments); + }; +})(); + +exports.copy = copy; +exports.readFile = readFile; +exports.readFileRaw = readFileRaw; +exports.normalizeOS = normalizeOS; + +var _fs; + +function _load_fs() { + return _fs = _interopRequireDefault(__webpack_require__(5)); +} + +var _glob; + +function _load_glob() { + return _glob = _interopRequireDefault(__webpack_require__(99)); +} + +var _os; + +function _load_os() { + return _os = _interopRequireDefault(__webpack_require__(49)); +} + +var _path; + +function _load_path() { + return _path = _interopRequireDefault(__webpack_require__(0)); +} + +var _blockingQueue; + +function _load_blockingQueue() { + return _blockingQueue = _interopRequireDefault(__webpack_require__(110)); +} + +var _promise; + +function _load_promise() { + return _promise = _interopRequireWildcard(__webpack_require__(50)); +} + +var _promise2; + +function _load_promise2() { + return _promise2 = __webpack_require__(50); +} + +var _map; + +function _load_map() { + return _map = _interopRequireDefault(__webpack_require__(29)); +} + +var _fsNormalized; + +function _load_fsNormalized() { + return _fsNormalized = __webpack_require__(218); +} + +function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +const constants = exports.constants = typeof (_fs || _load_fs()).default.constants !== 'undefined' ? (_fs || _load_fs()).default.constants : { + R_OK: (_fs || _load_fs()).default.R_OK, + W_OK: (_fs || _load_fs()).default.W_OK, + X_OK: (_fs || _load_fs()).default.X_OK +}; + +const lockQueue = exports.lockQueue = new (_blockingQueue || _load_blockingQueue()).default('fs lock'); + +const readFileBuffer = exports.readFileBuffer = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readFile); +const open = exports.open = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.open); +const writeFile = exports.writeFile = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.writeFile); +const readlink = exports.readlink = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readlink); +const realpath = exports.realpath = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.realpath); +const readdir = exports.readdir = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readdir); +const rename = exports.rename = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.rename); +const access = exports.access = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.access); +const stat = exports.stat = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.stat); +const mkdirp = exports.mkdirp = (0, (_promise2 || _load_promise2()).promisify)(__webpack_require__(145)); +const exists = exports.exists = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.exists, true); +const lstat = exports.lstat = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.lstat); +const chmod = exports.chmod = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.chmod); +const link = exports.link = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.link); +const glob = exports.glob = (0, (_promise2 || _load_promise2()).promisify)((_glob || _load_glob()).default); +exports.unlink = (_fsNormalized || _load_fsNormalized()).unlink; + +// fs.copyFile uses the native file copying instructions on the system, performing much better +// than any JS-based solution and consumes fewer resources. Repeated testing to fine tune the +// concurrency level revealed 128 as the sweet spot on a quad-core, 16 CPU Intel system with SSD. + +const CONCURRENT_QUEUE_ITEMS = (_fs || _load_fs()).default.copyFile ? 128 : 4; + +const fsSymlink = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.symlink); +const invariant = __webpack_require__(9); +const stripBOM = __webpack_require__(160); + +const noop = () => {}; + +function copy(src, dest, reporter) { + return copyBulk([{ src, dest }], reporter); +} + +function _readFile(loc, encoding) { + return new Promise((resolve, reject) => { + (_fs || _load_fs()).default.readFile(loc, encoding, function (err, content) { + if (err) { + reject(err); + } else { + resolve(content); + } + }); + }); +} + +function readFile(loc) { + return _readFile(loc, 'utf8').then(normalizeOS); +} + +function readFileRaw(loc) { + return _readFile(loc, 'binary'); +} + +function normalizeOS(body) { + return body.replace(/\r\n/g, '\n'); +} + +const cr = '\r'.charCodeAt(0); +const lf = '\n'.charCodeAt(0); + +/***/ }), +/* 5 */ +/***/ (function(module, exports) { + +module.exports = require("fs"); + +/***/ }), +/* 6 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +class MessageError extends Error { + constructor(msg, code) { + super(msg); + this.code = code; + } + +} + +exports.MessageError = MessageError; +class ProcessSpawnError extends MessageError { + constructor(msg, code, process) { + super(msg, code); + this.process = process; + } + +} + +exports.ProcessSpawnError = ProcessSpawnError; +class SecurityError extends MessageError {} + +exports.SecurityError = SecurityError; +class ProcessTermError extends MessageError {} + +exports.ProcessTermError = ProcessTermError; +class ResponseError extends Error { + constructor(msg, responseCode) { + super(msg); + this.responseCode = responseCode; + } + +} + +exports.ResponseError = ResponseError; +class OneTimePasswordError extends Error {} +exports.OneTimePasswordError = OneTimePasswordError; + +/***/ }), +/* 7 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subscriber; }); +/* unused harmony export SafeSubscriber */ +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_isFunction__ = __webpack_require__(154); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__Observer__ = __webpack_require__(420); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__Subscription__ = __webpack_require__(25); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__internal_symbol_rxSubscriber__ = __webpack_require__(321); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__config__ = __webpack_require__(185); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__util_hostReportError__ = __webpack_require__(323); +/** PURE_IMPORTS_START tslib,_util_isFunction,_Observer,_Subscription,_internal_symbol_rxSubscriber,_config,_util_hostReportError PURE_IMPORTS_END */ + + + + + + + +var Subscriber = /*@__PURE__*/ (function (_super) { + __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](Subscriber, _super); + function Subscriber(destinationOrNext, error, complete) { + var _this = _super.call(this) || this; + _this.syncErrorValue = null; + _this.syncErrorThrown = false; + _this.syncErrorThrowable = false; + _this.isStopped = false; + _this._parentSubscription = null; + switch (arguments.length) { + case 0: + _this.destination = __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]; + break; + case 1: + if (!destinationOrNext) { + _this.destination = __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]; + break; + } + if (typeof destinationOrNext === 'object') { + if (destinationOrNext instanceof Subscriber) { + _this.syncErrorThrowable = destinationOrNext.syncErrorThrowable; + _this.destination = destinationOrNext; + destinationOrNext.add(_this); + } + else { + _this.syncErrorThrowable = true; + _this.destination = new SafeSubscriber(_this, destinationOrNext); + } + break; + } + default: + _this.syncErrorThrowable = true; + _this.destination = new SafeSubscriber(_this, destinationOrNext, error, complete); + break; + } + return _this; + } + Subscriber.prototype[__WEBPACK_IMPORTED_MODULE_4__internal_symbol_rxSubscriber__["a" /* rxSubscriber */]] = function () { return this; }; + Subscriber.create = function (next, error, complete) { + var subscriber = new Subscriber(next, error, complete); + subscriber.syncErrorThrowable = false; + return subscriber; + }; + Subscriber.prototype.next = function (value) { + if (!this.isStopped) { + this._next(value); + } + }; + Subscriber.prototype.error = function (err) { + if (!this.isStopped) { + this.isStopped = true; + this._error(err); + } + }; + Subscriber.prototype.complete = function () { + if (!this.isStopped) { + this.isStopped = true; + this._complete(); + } + }; + Subscriber.prototype.unsubscribe = function () { + if (this.closed) { + return; + } + this.isStopped = true; + _super.prototype.unsubscribe.call(this); + }; + Subscriber.prototype._next = function (value) { + this.destination.next(value); + }; + Subscriber.prototype._error = function (err) { + this.destination.error(err); + this.unsubscribe(); + }; + Subscriber.prototype._complete = function () { + this.destination.complete(); + this.unsubscribe(); + }; + Subscriber.prototype._unsubscribeAndRecycle = function () { + var _a = this, _parent = _a._parent, _parents = _a._parents; + this._parent = null; + this._parents = null; + this.unsubscribe(); + this.closed = false; + this.isStopped = false; + this._parent = _parent; + this._parents = _parents; + this._parentSubscription = null; + return this; + }; + return Subscriber; +}(__WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */])); + +var SafeSubscriber = /*@__PURE__*/ (function (_super) { + __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](SafeSubscriber, _super); + function SafeSubscriber(_parentSubscriber, observerOrNext, error, complete) { + var _this = _super.call(this) || this; + _this._parentSubscriber = _parentSubscriber; + var next; + var context = _this; + if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isFunction__["a" /* isFunction */])(observerOrNext)) { + next = observerOrNext; + } + else if (observerOrNext) { + next = observerOrNext.next; + error = observerOrNext.error; + complete = observerOrNext.complete; + if (observerOrNext !== __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]) { + context = Object.create(observerOrNext); + if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isFunction__["a" /* isFunction */])(context.unsubscribe)) { + _this.add(context.unsubscribe.bind(context)); + } + context.unsubscribe = _this.unsubscribe.bind(_this); + } + } + _this._context = context; + _this._next = next; + _this._error = error; + _this._complete = complete; + return _this; + } + SafeSubscriber.prototype.next = function (value) { + if (!this.isStopped && this._next) { + var _parentSubscriber = this._parentSubscriber; + if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { + this.__tryOrUnsub(this._next, value); + } + else if (this.__tryOrSetError(_parentSubscriber, this._next, value)) { + this.unsubscribe(); + } + } + }; + SafeSubscriber.prototype.error = function (err) { + if (!this.isStopped) { + var _parentSubscriber = this._parentSubscriber; + var useDeprecatedSynchronousErrorHandling = __WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling; + if (this._error) { + if (!useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { + this.__tryOrUnsub(this._error, err); + this.unsubscribe(); + } + else { + this.__tryOrSetError(_parentSubscriber, this._error, err); + this.unsubscribe(); + } + } + else if (!_parentSubscriber.syncErrorThrowable) { + this.unsubscribe(); + if (useDeprecatedSynchronousErrorHandling) { + throw err; + } + __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); + } + else { + if (useDeprecatedSynchronousErrorHandling) { + _parentSubscriber.syncErrorValue = err; + _parentSubscriber.syncErrorThrown = true; + } + else { + __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); + } + this.unsubscribe(); + } + } + }; + SafeSubscriber.prototype.complete = function () { + var _this = this; + if (!this.isStopped) { + var _parentSubscriber = this._parentSubscriber; + if (this._complete) { + var wrappedComplete = function () { return _this._complete.call(_this._context); }; + if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { + this.__tryOrUnsub(wrappedComplete); + this.unsubscribe(); + } + else { + this.__tryOrSetError(_parentSubscriber, wrappedComplete); + this.unsubscribe(); + } + } + else { + this.unsubscribe(); + } + } + }; + SafeSubscriber.prototype.__tryOrUnsub = function (fn, value) { + try { + fn.call(this._context, value); + } + catch (err) { + this.unsubscribe(); + if (__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { + throw err; + } + else { + __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); + } + } + }; + SafeSubscriber.prototype.__tryOrSetError = function (parent, fn, value) { + if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { + throw new Error('bad call'); + } + try { + fn.call(this._context, value); + } + catch (err) { + if (__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { + parent.syncErrorValue = err; + parent.syncErrorThrown = true; + return true; + } + else { + __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); + return true; + } + } + return false; + }; + SafeSubscriber.prototype._unsubscribe = function () { + var _parentSubscriber = this._parentSubscriber; + this._context = null; + this._parentSubscriber = null; + _parentSubscriber.unsubscribe(); + }; + return SafeSubscriber; +}(Subscriber)); + +//# sourceMappingURL=Subscriber.js.map + + +/***/ }), +/* 8 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.getPathKey = getPathKey; +const os = __webpack_require__(49); +const path = __webpack_require__(0); +const userHome = __webpack_require__(67).default; + +var _require = __webpack_require__(225); + +const getCacheDir = _require.getCacheDir, + getConfigDir = _require.getConfigDir, + getDataDir = _require.getDataDir; + +const isWebpackBundle = __webpack_require__(278); + +const DEPENDENCY_TYPES = exports.DEPENDENCY_TYPES = ['devDependencies', 'dependencies', 'optionalDependencies', 'peerDependencies']; +const OWNED_DEPENDENCY_TYPES = exports.OWNED_DEPENDENCY_TYPES = ['devDependencies', 'dependencies', 'optionalDependencies']; + +const RESOLUTIONS = exports.RESOLUTIONS = 'resolutions'; +const MANIFEST_FIELDS = exports.MANIFEST_FIELDS = [RESOLUTIONS, ...DEPENDENCY_TYPES]; + +const SUPPORTED_NODE_VERSIONS = exports.SUPPORTED_NODE_VERSIONS = '^4.8.0 || ^5.7.0 || ^6.2.2 || >=8.0.0'; + +const YARN_REGISTRY = exports.YARN_REGISTRY = 'https://registry.yarnpkg.com'; +const NPM_REGISTRY_RE = exports.NPM_REGISTRY_RE = /https?:\/\/registry\.npmjs\.org/g; + +const YARN_DOCS = exports.YARN_DOCS = 'https://yarnpkg.com/en/docs/cli/'; +const YARN_INSTALLER_SH = exports.YARN_INSTALLER_SH = 'https://yarnpkg.com/install.sh'; +const YARN_INSTALLER_MSI = exports.YARN_INSTALLER_MSI = 'https://yarnpkg.com/latest.msi'; + +const SELF_UPDATE_VERSION_URL = exports.SELF_UPDATE_VERSION_URL = 'https://yarnpkg.com/latest-version'; + +// cache version, bump whenever we make backwards incompatible changes +const CACHE_VERSION = exports.CACHE_VERSION = 6; + +// lockfile version, bump whenever we make backwards incompatible changes +const LOCKFILE_VERSION = exports.LOCKFILE_VERSION = 1; + +// max amount of network requests to perform concurrently +const NETWORK_CONCURRENCY = exports.NETWORK_CONCURRENCY = 8; + +// HTTP timeout used when downloading packages +const NETWORK_TIMEOUT = exports.NETWORK_TIMEOUT = 30 * 1000; // in milliseconds + +// max amount of child processes to execute concurrently +const CHILD_CONCURRENCY = exports.CHILD_CONCURRENCY = 5; + +const REQUIRED_PACKAGE_KEYS = exports.REQUIRED_PACKAGE_KEYS = ['name', 'version', '_uid']; + +function getPreferredCacheDirectories() { + const preferredCacheDirectories = [getCacheDir()]; + + if (process.getuid) { + // $FlowFixMe: process.getuid exists, dammit + preferredCacheDirectories.push(path.join(os.tmpdir(), `.yarn-cache-${process.getuid()}`)); + } + + preferredCacheDirectories.push(path.join(os.tmpdir(), `.yarn-cache`)); + + return preferredCacheDirectories; +} + +const PREFERRED_MODULE_CACHE_DIRECTORIES = exports.PREFERRED_MODULE_CACHE_DIRECTORIES = getPreferredCacheDirectories(); +const CONFIG_DIRECTORY = exports.CONFIG_DIRECTORY = getConfigDir(); +const DATA_DIRECTORY = exports.DATA_DIRECTORY = getDataDir(); +const LINK_REGISTRY_DIRECTORY = exports.LINK_REGISTRY_DIRECTORY = path.join(DATA_DIRECTORY, 'link'); +const GLOBAL_MODULE_DIRECTORY = exports.GLOBAL_MODULE_DIRECTORY = path.join(DATA_DIRECTORY, 'global'); + +const NODE_BIN_PATH = exports.NODE_BIN_PATH = process.execPath; +const YARN_BIN_PATH = exports.YARN_BIN_PATH = getYarnBinPath(); + +// Webpack needs to be configured with node.__dirname/__filename = false +function getYarnBinPath() { + if (isWebpackBundle) { + return __filename; + } else { + return path.join(__dirname, '..', 'bin', 'yarn.js'); + } +} + +const NODE_MODULES_FOLDER = exports.NODE_MODULES_FOLDER = 'node_modules'; +const NODE_PACKAGE_JSON = exports.NODE_PACKAGE_JSON = 'package.json'; + +const PNP_FILENAME = exports.PNP_FILENAME = '.pnp.js'; + +const POSIX_GLOBAL_PREFIX = exports.POSIX_GLOBAL_PREFIX = `${process.env.DESTDIR || ''}/usr/local`; +const FALLBACK_GLOBAL_PREFIX = exports.FALLBACK_GLOBAL_PREFIX = path.join(userHome, '.yarn'); + +const META_FOLDER = exports.META_FOLDER = '.yarn-meta'; +const INTEGRITY_FILENAME = exports.INTEGRITY_FILENAME = '.yarn-integrity'; +const LOCKFILE_FILENAME = exports.LOCKFILE_FILENAME = 'yarn.lock'; +const METADATA_FILENAME = exports.METADATA_FILENAME = '.yarn-metadata.json'; +const TARBALL_FILENAME = exports.TARBALL_FILENAME = '.yarn-tarball.tgz'; +const CLEAN_FILENAME = exports.CLEAN_FILENAME = '.yarnclean'; + +const NPM_LOCK_FILENAME = exports.NPM_LOCK_FILENAME = 'package-lock.json'; +const NPM_SHRINKWRAP_FILENAME = exports.NPM_SHRINKWRAP_FILENAME = 'npm-shrinkwrap.json'; + +const DEFAULT_INDENT = exports.DEFAULT_INDENT = ' '; +const SINGLE_INSTANCE_PORT = exports.SINGLE_INSTANCE_PORT = 31997; +const SINGLE_INSTANCE_FILENAME = exports.SINGLE_INSTANCE_FILENAME = '.yarn-single-instance'; + +const ENV_PATH_KEY = exports.ENV_PATH_KEY = getPathKey(process.platform, process.env); + +function getPathKey(platform, env) { + let pathKey = 'PATH'; + + // windows calls its path "Path" usually, but this is not guaranteed. + if (platform === 'win32') { + pathKey = 'Path'; + + for (const key in env) { + if (key.toLowerCase() === 'path') { + pathKey = key; + } + } + } + + return pathKey; +} + +const VERSION_COLOR_SCHEME = exports.VERSION_COLOR_SCHEME = { + major: 'red', + premajor: 'red', + minor: 'yellow', + preminor: 'yellow', + patch: 'green', + prepatch: 'green', + prerelease: 'red', + unchanged: 'white', + unknown: 'red' +}; + +/***/ }), +/* 9 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; +/** + * Copyright (c) 2013-present, Facebook, Inc. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + + + +/** + * Use invariant() to assert state which your program assumes to be true. + * + * Provide sprintf-style format (only %s is supported) and arguments + * to provide information about what broke and what you were + * expecting. + * + * The invariant message will be stripped in production, but the invariant + * will remain to ensure logic does not differ in production. + */ + +var NODE_ENV = process.env.NODE_ENV; + +var invariant = function(condition, format, a, b, c, d, e, f) { + if (NODE_ENV !== 'production') { + if (format === undefined) { + throw new Error('invariant requires an error message argument'); + } + } + + if (!condition) { + var error; + if (format === undefined) { + error = new Error( + 'Minified exception occurred; use the non-minified dev environment ' + + 'for the full error message and additional helpful warnings.' + ); + } else { + var args = [a, b, c, d, e, f]; + var argIndex = 0; + error = new Error( + format.replace(/%s/g, function() { return args[argIndex++]; }) + ); + error.name = 'Invariant Violation'; + } + + error.framesToPop = 1; // we don't care about invariant's own frame + throw error; + } +}; + +module.exports = invariant; + + +/***/ }), +/* 10 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +var YAMLException = __webpack_require__(54); + +var TYPE_CONSTRUCTOR_OPTIONS = [ + 'kind', + 'resolve', + 'construct', + 'instanceOf', + 'predicate', + 'represent', + 'defaultStyle', + 'styleAliases' +]; + +var YAML_NODE_KINDS = [ + 'scalar', + 'sequence', + 'mapping' +]; + +function compileStyleAliases(map) { + var result = {}; + + if (map !== null) { + Object.keys(map).forEach(function (style) { + map[style].forEach(function (alias) { + result[String(alias)] = style; + }); + }); + } + + return result; +} + +function Type(tag, options) { + options = options || {}; + + Object.keys(options).forEach(function (name) { + if (TYPE_CONSTRUCTOR_OPTIONS.indexOf(name) === -1) { + throw new YAMLException('Unknown option "' + name + '" is met in definition of "' + tag + '" YAML type.'); + } + }); + + // TODO: Add tag format check. + this.tag = tag; + this.kind = options['kind'] || null; + this.resolve = options['resolve'] || function () { return true; }; + this.construct = options['construct'] || function (data) { return data; }; + this.instanceOf = options['instanceOf'] || null; + this.predicate = options['predicate'] || null; + this.represent = options['represent'] || null; + this.defaultStyle = options['defaultStyle'] || null; + this.styleAliases = compileStyleAliases(options['styleAliases'] || null); + + if (YAML_NODE_KINDS.indexOf(this.kind) === -1) { + throw new YAMLException('Unknown kind "' + this.kind + '" is specified for "' + tag + '" YAML type.'); + } +} + +module.exports = Type; + + +/***/ }), +/* 11 */ +/***/ (function(module, exports) { + +module.exports = require("crypto"); + +/***/ }), +/* 12 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Observable; }); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__util_canReportError__ = __webpack_require__(322); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_toSubscriber__ = __webpack_require__(932); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__internal_symbol_observable__ = __webpack_require__(117); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__util_pipe__ = __webpack_require__(324); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__config__ = __webpack_require__(185); +/** PURE_IMPORTS_START _util_canReportError,_util_toSubscriber,_internal_symbol_observable,_util_pipe,_config PURE_IMPORTS_END */ + + + + + +var Observable = /*@__PURE__*/ (function () { + function Observable(subscribe) { + this._isScalar = false; + if (subscribe) { + this._subscribe = subscribe; + } + } + Observable.prototype.lift = function (operator) { + var observable = new Observable(); + observable.source = this; + observable.operator = operator; + return observable; + }; + Observable.prototype.subscribe = function (observerOrNext, error, complete) { + var operator = this.operator; + var sink = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_toSubscriber__["a" /* toSubscriber */])(observerOrNext, error, complete); + if (operator) { + operator.call(sink, this.source); + } + else { + sink.add(this.source || (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling && !sink.syncErrorThrowable) ? + this._subscribe(sink) : + this._trySubscribe(sink)); + } + if (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { + if (sink.syncErrorThrowable) { + sink.syncErrorThrowable = false; + if (sink.syncErrorThrown) { + throw sink.syncErrorValue; + } + } + } + return sink; + }; + Observable.prototype._trySubscribe = function (sink) { + try { + return this._subscribe(sink); + } + catch (err) { + if (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { + sink.syncErrorThrown = true; + sink.syncErrorValue = err; + } + if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_0__util_canReportError__["a" /* canReportError */])(sink)) { + sink.error(err); + } + else { + console.warn(err); + } + } + }; + Observable.prototype.forEach = function (next, promiseCtor) { + var _this = this; + promiseCtor = getPromiseCtor(promiseCtor); + return new promiseCtor(function (resolve, reject) { + var subscription; + subscription = _this.subscribe(function (value) { + try { + next(value); + } + catch (err) { + reject(err); + if (subscription) { + subscription.unsubscribe(); + } + } + }, reject, resolve); + }); + }; + Observable.prototype._subscribe = function (subscriber) { + var source = this.source; + return source && source.subscribe(subscriber); + }; + Observable.prototype[__WEBPACK_IMPORTED_MODULE_2__internal_symbol_observable__["a" /* observable */]] = function () { + return this; + }; + Observable.prototype.pipe = function () { + var operations = []; + for (var _i = 0; _i < arguments.length; _i++) { + operations[_i] = arguments[_i]; + } + if (operations.length === 0) { + return this; + } + return __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_pipe__["b" /* pipeFromArray */])(operations)(this); + }; + Observable.prototype.toPromise = function (promiseCtor) { + var _this = this; + promiseCtor = getPromiseCtor(promiseCtor); + return new promiseCtor(function (resolve, reject) { + var value; + _this.subscribe(function (x) { return value = x; }, function (err) { return reject(err); }, function () { return resolve(value); }); + }); + }; + Observable.create = function (subscribe) { + return new Observable(subscribe); + }; + return Observable; +}()); + +function getPromiseCtor(promiseCtor) { + if (!promiseCtor) { + promiseCtor = __WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].Promise || Promise; + } + if (!promiseCtor) { + throw new Error('no Promise impl found'); + } + return promiseCtor; +} +//# sourceMappingURL=Observable.js.map + + +/***/ }), +/* 13 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return OuterSubscriber; }); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__Subscriber__ = __webpack_require__(7); +/** PURE_IMPORTS_START tslib,_Subscriber PURE_IMPORTS_END */ + + +var OuterSubscriber = /*@__PURE__*/ (function (_super) { + __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](OuterSubscriber, _super); + function OuterSubscriber() { + return _super !== null && _super.apply(this, arguments) || this; + } + OuterSubscriber.prototype.notifyNext = function (outerValue, innerValue, outerIndex, innerIndex, innerSub) { + this.destination.next(innerValue); + }; + OuterSubscriber.prototype.notifyError = function (error, innerSub) { + this.destination.error(error); + }; + OuterSubscriber.prototype.notifyComplete = function (innerSub) { + this.destination.complete(); + }; + return OuterSubscriber; +}(__WEBPACK_IMPORTED_MODULE_1__Subscriber__["a" /* Subscriber */])); + +//# sourceMappingURL=OuterSubscriber.js.map + + +/***/ }), +/* 14 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (immutable) */ __webpack_exports__["a"] = subscribeToResult; +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__InnerSubscriber__ = __webpack_require__(84); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__subscribeTo__ = __webpack_require__(446); +/** PURE_IMPORTS_START _InnerSubscriber,_subscribeTo PURE_IMPORTS_END */ + + +function subscribeToResult(outerSubscriber, result, outerValue, outerIndex, destination) { + if (destination === void 0) { + destination = new __WEBPACK_IMPORTED_MODULE_0__InnerSubscriber__["a" /* InnerSubscriber */](outerSubscriber, outerValue, outerIndex); + } + if (destination.closed) { + return; + } + return __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__subscribeTo__["a" /* subscribeTo */])(result)(destination); +} +//# sourceMappingURL=subscribeToResult.js.map + + +/***/ }), +/* 15 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; +/* eslint-disable node/no-deprecated-api */ + + + +var buffer = __webpack_require__(64) +var Buffer = buffer.Buffer + +var safer = {} + +var key + +for (key in buffer) { + if (!buffer.hasOwnProperty(key)) continue + if (key === 'SlowBuffer' || key === 'Buffer') continue + safer[key] = buffer[key] +} + +var Safer = safer.Buffer = {} +for (key in Buffer) { + if (!Buffer.hasOwnProperty(key)) continue + if (key === 'allocUnsafe' || key === 'allocUnsafeSlow') continue + Safer[key] = Buffer[key] +} + +safer.Buffer.prototype = Buffer.prototype + +if (!Safer.from || Safer.from === Uint8Array.from) { + Safer.from = function (value, encodingOrOffset, length) { + if (typeof value === 'number') { + throw new TypeError('The "value" argument must not be of type number. Received type ' + typeof value) + } + if (value && typeof value.length === 'undefined') { + throw new TypeError('The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type ' + typeof value) + } + return Buffer(value, encodingOrOffset, length) + } +} + +if (!Safer.alloc) { + Safer.alloc = function (size, fill, encoding) { + if (typeof size !== 'number') { + throw new TypeError('The "size" argument must be of type number. Received type ' + typeof size) + } + if (size < 0 || size >= 2 * (1 << 30)) { + throw new RangeError('The value "' + size + '" is invalid for option "size"') + } + var buf = Buffer(size) + if (!fill || fill.length === 0) { + buf.fill(0) + } else if (typeof encoding === 'string') { + buf.fill(fill, encoding) + } else { + buf.fill(fill) + } + return buf + } +} + +if (!safer.kStringMaxLength) { + try { + safer.kStringMaxLength = process.binding('buffer').kStringMaxLength + } catch (e) { + // we can't determine kStringMaxLength in environments where process.binding + // is unsupported, so let's not set it + } +} + +if (!safer.constants) { + safer.constants = { + MAX_LENGTH: safer.kMaxLength + } + if (safer.kStringMaxLength) { + safer.constants.MAX_STRING_LENGTH = safer.kStringMaxLength + } +} + +module.exports = safer + + +/***/ }), +/* 16 */ +/***/ (function(module, exports, __webpack_require__) { + +// Copyright (c) 2012, Mark Cavage. All rights reserved. +// Copyright 2015 Joyent, Inc. + +var assert = __webpack_require__(28); +var Stream = __webpack_require__(23).Stream; +var util = __webpack_require__(3); + + +///--- Globals + +/* JSSTYLED */ +var UUID_REGEXP = /^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$/; + + +///--- Internal + +function _capitalize(str) { + return (str.charAt(0).toUpperCase() + str.slice(1)); +} + +function _toss(name, expected, oper, arg, actual) { + throw new assert.AssertionError({ + message: util.format('%s (%s) is required', name, expected), + actual: (actual === undefined) ? typeof (arg) : actual(arg), + expected: expected, + operator: oper || '===', + stackStartFunction: _toss.caller + }); +} + +function _getClass(arg) { + return (Object.prototype.toString.call(arg).slice(8, -1)); +} + +function noop() { + // Why even bother with asserts? +} + + +///--- Exports + +var types = { + bool: { + check: function (arg) { return typeof (arg) === 'boolean'; } + }, + func: { + check: function (arg) { return typeof (arg) === 'function'; } + }, + string: { + check: function (arg) { return typeof (arg) === 'string'; } + }, + object: { + check: function (arg) { + return typeof (arg) === 'object' && arg !== null; + } + }, + number: { + check: function (arg) { + return typeof (arg) === 'number' && !isNaN(arg); + } + }, + finite: { + check: function (arg) { + return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg); + } + }, + buffer: { + check: function (arg) { return Buffer.isBuffer(arg); }, + operator: 'Buffer.isBuffer' + }, + array: { + check: function (arg) { return Array.isArray(arg); }, + operator: 'Array.isArray' + }, + stream: { + check: function (arg) { return arg instanceof Stream; }, + operator: 'instanceof', + actual: _getClass + }, + date: { + check: function (arg) { return arg instanceof Date; }, + operator: 'instanceof', + actual: _getClass + }, + regexp: { + check: function (arg) { return arg instanceof RegExp; }, + operator: 'instanceof', + actual: _getClass + }, + uuid: { + check: function (arg) { + return typeof (arg) === 'string' && UUID_REGEXP.test(arg); + }, + operator: 'isUUID' + } +}; + +function _setExports(ndebug) { + var keys = Object.keys(types); + var out; + + /* re-export standard assert */ + if (process.env.NODE_NDEBUG) { + out = noop; + } else { + out = function (arg, msg) { + if (!arg) { + _toss(msg, 'true', arg); + } + }; + } + + /* standard checks */ + keys.forEach(function (k) { + if (ndebug) { + out[k] = noop; + return; + } + var type = types[k]; + out[k] = function (arg, msg) { + if (!type.check(arg)) { + _toss(msg, k, type.operator, arg, type.actual); + } + }; + }); + + /* optional checks */ + keys.forEach(function (k) { + var name = 'optional' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + out[name] = function (arg, msg) { + if (arg === undefined || arg === null) { + return; + } + if (!type.check(arg)) { + _toss(msg, k, type.operator, arg, type.actual); + } + }; + }); + + /* arrayOf checks */ + keys.forEach(function (k) { + var name = 'arrayOf' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + var expected = '[' + k + ']'; + out[name] = function (arg, msg) { + if (!Array.isArray(arg)) { + _toss(msg, expected, type.operator, arg, type.actual); + } + var i; + for (i = 0; i < arg.length; i++) { + if (!type.check(arg[i])) { + _toss(msg, expected, type.operator, arg, type.actual); + } + } + }; + }); + + /* optionalArrayOf checks */ + keys.forEach(function (k) { + var name = 'optionalArrayOf' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + var expected = '[' + k + ']'; + out[name] = function (arg, msg) { + if (arg === undefined || arg === null) { + return; + } + if (!Array.isArray(arg)) { + _toss(msg, expected, type.operator, arg, type.actual); + } + var i; + for (i = 0; i < arg.length; i++) { + if (!type.check(arg[i])) { + _toss(msg, expected, type.operator, arg, type.actual); + } + } + }; + }); + + /* re-export built-in assertions */ + Object.keys(assert).forEach(function (k) { + if (k === 'AssertionError') { + out[k] = assert[k]; + return; + } + if (ndebug) { + out[k] = noop; + return; + } + out[k] = assert[k]; + }); + + /* export ourselves (for unit tests _only_) */ + out._setExports = _setExports; + + return out; +} + +module.exports = _setExports(process.env.NODE_NDEBUG); + + +/***/ }), +/* 17 */ +/***/ (function(module, exports) { + +// https://github.com/zloirock/core-js/issues/86#issuecomment-115759028 +var global = module.exports = typeof window != 'undefined' && window.Math == Math + ? window : typeof self != 'undefined' && self.Math == Math ? self + // eslint-disable-next-line no-new-func + : Function('return this')(); +if (typeof __g == 'number') __g = global; // eslint-disable-line no-undef + + +/***/ }), +/* 18 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.sortAlpha = sortAlpha; +exports.sortOptionsByFlags = sortOptionsByFlags; +exports.entries = entries; +exports.removePrefix = removePrefix; +exports.removeSuffix = removeSuffix; +exports.addSuffix = addSuffix; +exports.hyphenate = hyphenate; +exports.camelCase = camelCase; +exports.compareSortedArrays = compareSortedArrays; +exports.sleep = sleep; +const _camelCase = __webpack_require__(230); + +function sortAlpha(a, b) { + // sort alphabetically in a deterministic way + const shortLen = Math.min(a.length, b.length); + for (let i = 0; i < shortLen; i++) { + const aChar = a.charCodeAt(i); + const bChar = b.charCodeAt(i); + if (aChar !== bChar) { + return aChar - bChar; + } + } + return a.length - b.length; +} + +function sortOptionsByFlags(a, b) { + const aOpt = a.flags.replace(/-/g, ''); + const bOpt = b.flags.replace(/-/g, ''); + return sortAlpha(aOpt, bOpt); +} + +function entries(obj) { + const entries = []; + if (obj) { + for (const key in obj) { + entries.push([key, obj[key]]); + } + } + return entries; +} + +function removePrefix(pattern, prefix) { + if (pattern.startsWith(prefix)) { + pattern = pattern.slice(prefix.length); + } + + return pattern; +} + +function removeSuffix(pattern, suffix) { + if (pattern.endsWith(suffix)) { + return pattern.slice(0, -suffix.length); + } + + return pattern; +} + +function addSuffix(pattern, suffix) { + if (!pattern.endsWith(suffix)) { + return pattern + suffix; + } + + return pattern; +} + +function hyphenate(str) { + return str.replace(/[A-Z]/g, match => { + return '-' + match.charAt(0).toLowerCase(); + }); +} + +function camelCase(str) { + if (/[A-Z]/.test(str)) { + return null; + } else { + return _camelCase(str); + } +} + +function compareSortedArrays(array1, array2) { + if (array1.length !== array2.length) { + return false; + } + for (let i = 0, len = array1.length; i < len; i++) { + if (array1[i] !== array2[i]) { + return false; + } + } + return true; +} + +function sleep(ms) { + return new Promise(resolve => { + setTimeout(resolve, ms); + }); +} + +/***/ }), +/* 19 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.stringify = exports.parse = undefined; + +var _asyncToGenerator2; + +function _load_asyncToGenerator() { + return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); +} + +var _parse; + +function _load_parse() { + return _parse = __webpack_require__(105); +} + +Object.defineProperty(exports, 'parse', { + enumerable: true, + get: function get() { + return _interopRequireDefault(_parse || _load_parse()).default; + } +}); + +var _stringify; + +function _load_stringify() { + return _stringify = __webpack_require__(199); +} + +Object.defineProperty(exports, 'stringify', { + enumerable: true, + get: function get() { + return _interopRequireDefault(_stringify || _load_stringify()).default; + } +}); +exports.implodeEntry = implodeEntry; +exports.explodeEntry = explodeEntry; + +var _misc; + +function _load_misc() { + return _misc = __webpack_require__(18); +} + +var _normalizePattern; + +function _load_normalizePattern() { + return _normalizePattern = __webpack_require__(37); +} + +var _parse2; + +function _load_parse2() { + return _parse2 = _interopRequireDefault(__webpack_require__(105)); +} + +var _constants; + +function _load_constants() { + return _constants = __webpack_require__(8); +} + +var _fs; + +function _load_fs() { + return _fs = _interopRequireWildcard(__webpack_require__(4)); +} + +function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +const invariant = __webpack_require__(9); + +const path = __webpack_require__(0); +const ssri = __webpack_require__(65); + +function getName(pattern) { + return (0, (_normalizePattern || _load_normalizePattern()).normalizePattern)(pattern).name; +} + +function blankObjectUndefined(obj) { + return obj && Object.keys(obj).length ? obj : undefined; +} + +function keyForRemote(remote) { + return remote.resolved || (remote.reference && remote.hash ? `${remote.reference}#${remote.hash}` : null); +} + +function serializeIntegrity(integrity) { + // We need this because `Integrity.toString()` does not use sorting to ensure a stable string output + // See https://git.io/vx2Hy + return integrity.toString().split(' ').sort().join(' '); +} + +function implodeEntry(pattern, obj) { + const inferredName = getName(pattern); + const integrity = obj.integrity ? serializeIntegrity(obj.integrity) : ''; + const imploded = { + name: inferredName === obj.name ? undefined : obj.name, + version: obj.version, + uid: obj.uid === obj.version ? undefined : obj.uid, + resolved: obj.resolved, + registry: obj.registry === 'npm' ? undefined : obj.registry, + dependencies: blankObjectUndefined(obj.dependencies), + optionalDependencies: blankObjectUndefined(obj.optionalDependencies), + permissions: blankObjectUndefined(obj.permissions), + prebuiltVariants: blankObjectUndefined(obj.prebuiltVariants) + }; + if (integrity) { + imploded.integrity = integrity; + } + return imploded; +} + +function explodeEntry(pattern, obj) { + obj.optionalDependencies = obj.optionalDependencies || {}; + obj.dependencies = obj.dependencies || {}; + obj.uid = obj.uid || obj.version; + obj.permissions = obj.permissions || {}; + obj.registry = obj.registry || 'npm'; + obj.name = obj.name || getName(pattern); + const integrity = obj.integrity; + if (integrity && integrity.isIntegrity) { + obj.integrity = ssri.parse(integrity); + } + return obj; +} + +class Lockfile { + constructor({ cache, source, parseResultType } = {}) { + this.source = source || ''; + this.cache = cache; + this.parseResultType = parseResultType; + } + + // source string if the `cache` was parsed + + + // if true, we're parsing an old yarn file and need to update integrity fields + hasEntriesExistWithoutIntegrity() { + if (!this.cache) { + return false; + } + + for (const key in this.cache) { + // $FlowFixMe - `this.cache` is clearly defined at this point + if (!/^.*@(file:|http)/.test(key) && this.cache[key] && !this.cache[key].integrity) { + return true; + } + } + + return false; + } + + static fromDirectory(dir, reporter) { + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + // read the manifest in this directory + const lockfileLoc = path.join(dir, (_constants || _load_constants()).LOCKFILE_FILENAME); + + let lockfile; + let rawLockfile = ''; + let parseResult; + + if (yield (_fs || _load_fs()).exists(lockfileLoc)) { + rawLockfile = yield (_fs || _load_fs()).readFile(lockfileLoc); + parseResult = (0, (_parse2 || _load_parse2()).default)(rawLockfile, lockfileLoc); + + if (reporter) { + if (parseResult.type === 'merge') { + reporter.info(reporter.lang('lockfileMerged')); + } else if (parseResult.type === 'conflict') { + reporter.warn(reporter.lang('lockfileConflict')); + } + } + + lockfile = parseResult.object; + } else if (reporter) { + reporter.info(reporter.lang('noLockfileFound')); + } + + if (lockfile && lockfile.__metadata) { + const lockfilev2 = lockfile; + lockfile = {}; + } + + return new Lockfile({ cache: lockfile, source: rawLockfile, parseResultType: parseResult && parseResult.type }); + })(); + } + + getLocked(pattern) { + const cache = this.cache; + if (!cache) { + return undefined; + } + + const shrunk = pattern in cache && cache[pattern]; + + if (typeof shrunk === 'string') { + return this.getLocked(shrunk); + } else if (shrunk) { + explodeEntry(pattern, shrunk); + return shrunk; + } + + return undefined; + } + + removePattern(pattern) { + const cache = this.cache; + if (!cache) { + return; + } + delete cache[pattern]; + } + + getLockfile(patterns) { + const lockfile = {}; + const seen = new Map(); + + // order by name so that lockfile manifest is assigned to the first dependency with this manifest + // the others that have the same remoteKey will just refer to the first + // ordering allows for consistency in lockfile when it is serialized + const sortedPatternsKeys = Object.keys(patterns).sort((_misc || _load_misc()).sortAlpha); + + for (var _iterator = sortedPatternsKeys, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { + var _ref; + + if (_isArray) { + if (_i >= _iterator.length) break; + _ref = _iterator[_i++]; + } else { + _i = _iterator.next(); + if (_i.done) break; + _ref = _i.value; + } + + const pattern = _ref; + + const pkg = patterns[pattern]; + const remote = pkg._remote, + ref = pkg._reference; + + invariant(ref, 'Package is missing a reference'); + invariant(remote, 'Package is missing a remote'); + + const remoteKey = keyForRemote(remote); + const seenPattern = remoteKey && seen.get(remoteKey); + if (seenPattern) { + // no point in duplicating it + lockfile[pattern] = seenPattern; + + // if we're relying on our name being inferred and two of the patterns have + // different inferred names then we need to set it + if (!seenPattern.name && getName(pattern) !== pkg.name) { + seenPattern.name = pkg.name; + } + continue; + } + const obj = implodeEntry(pattern, { + name: pkg.name, + version: pkg.version, + uid: pkg._uid, + resolved: remote.resolved, + integrity: remote.integrity, + registry: remote.registry, + dependencies: pkg.dependencies, + peerDependencies: pkg.peerDependencies, + optionalDependencies: pkg.optionalDependencies, + permissions: ref.permissions, + prebuiltVariants: pkg.prebuiltVariants + }); + + lockfile[pattern] = obj; + + if (remoteKey) { + seen.set(remoteKey, obj); + } + } + + return lockfile; + } +} +exports.default = Lockfile; + +/***/ }), +/* 20 */ +/***/ (function(module, exports, __webpack_require__) { + +var store = __webpack_require__(133)('wks'); +var uid = __webpack_require__(137); +var Symbol = __webpack_require__(17).Symbol; +var USE_SYMBOL = typeof Symbol == 'function'; + +var $exports = module.exports = function (name) { + return store[name] || (store[name] = + USE_SYMBOL && Symbol[name] || (USE_SYMBOL ? Symbol : uid)('Symbol.' + name)); +}; + +$exports.store = store; + + +/***/ }), +/* 21 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +exports.__esModule = true; + +var _assign = __webpack_require__(591); + +var _assign2 = _interopRequireDefault(_assign); + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +exports.default = _assign2.default || function (target) { + for (var i = 1; i < arguments.length; i++) { + var source = arguments[i]; + + for (var key in source) { + if (Object.prototype.hasOwnProperty.call(source, key)) { + target[key] = source[key]; + } + } + } + + return target; +}; + +/***/ }), +/* 22 */ +/***/ (function(module, exports) { + +exports = module.exports = SemVer; + +// The debug function is excluded entirely from the minified version. +/* nomin */ var debug; +/* nomin */ if (typeof process === 'object' && + /* nomin */ process.env && + /* nomin */ process.env.NODE_DEBUG && + /* nomin */ /\bsemver\b/i.test(process.env.NODE_DEBUG)) + /* nomin */ debug = function() { + /* nomin */ var args = Array.prototype.slice.call(arguments, 0); + /* nomin */ args.unshift('SEMVER'); + /* nomin */ console.log.apply(console, args); + /* nomin */ }; +/* nomin */ else + /* nomin */ debug = function() {}; + +// Note: this is the semver.org version of the spec that it implements +// Not necessarily the package version of this code. +exports.SEMVER_SPEC_VERSION = '2.0.0'; + +var MAX_LENGTH = 256; +var MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991; + +// Max safe segment length for coercion. +var MAX_SAFE_COMPONENT_LENGTH = 16; + +// The actual regexps go on exports.re +var re = exports.re = []; +var src = exports.src = []; +var R = 0; + +// The following Regular Expressions can be used for tokenizing, +// validating, and parsing SemVer version strings. + +// ## Numeric Identifier +// A single `0`, or a non-zero digit followed by zero or more digits. + +var NUMERICIDENTIFIER = R++; +src[NUMERICIDENTIFIER] = '0|[1-9]\\d*'; +var NUMERICIDENTIFIERLOOSE = R++; +src[NUMERICIDENTIFIERLOOSE] = '[0-9]+'; + + +// ## Non-numeric Identifier +// Zero or more digits, followed by a letter or hyphen, and then zero or +// more letters, digits, or hyphens. + +var NONNUMERICIDENTIFIER = R++; +src[NONNUMERICIDENTIFIER] = '\\d*[a-zA-Z-][a-zA-Z0-9-]*'; + + +// ## Main Version +// Three dot-separated numeric identifiers. + +var MAINVERSION = R++; +src[MAINVERSION] = '(' + src[NUMERICIDENTIFIER] + ')\\.' + + '(' + src[NUMERICIDENTIFIER] + ')\\.' + + '(' + src[NUMERICIDENTIFIER] + ')'; + +var MAINVERSIONLOOSE = R++; +src[MAINVERSIONLOOSE] = '(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' + + '(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' + + '(' + src[NUMERICIDENTIFIERLOOSE] + ')'; + +// ## Pre-release Version Identifier +// A numeric identifier, or a non-numeric identifier. + +var PRERELEASEIDENTIFIER = R++; +src[PRERELEASEIDENTIFIER] = '(?:' + src[NUMERICIDENTIFIER] + + '|' + src[NONNUMERICIDENTIFIER] + ')'; + +var PRERELEASEIDENTIFIERLOOSE = R++; +src[PRERELEASEIDENTIFIERLOOSE] = '(?:' + src[NUMERICIDENTIFIERLOOSE] + + '|' + src[NONNUMERICIDENTIFIER] + ')'; + + +// ## Pre-release Version +// Hyphen, followed by one or more dot-separated pre-release version +// identifiers. + +var PRERELEASE = R++; +src[PRERELEASE] = '(?:-(' + src[PRERELEASEIDENTIFIER] + + '(?:\\.' + src[PRERELEASEIDENTIFIER] + ')*))'; + +var PRERELEASELOOSE = R++; +src[PRERELEASELOOSE] = '(?:-?(' + src[PRERELEASEIDENTIFIERLOOSE] + + '(?:\\.' + src[PRERELEASEIDENTIFIERLOOSE] + ')*))'; + +// ## Build Metadata Identifier +// Any combination of digits, letters, or hyphens. + +var BUILDIDENTIFIER = R++; +src[BUILDIDENTIFIER] = '[0-9A-Za-z-]+'; + +// ## Build Metadata +// Plus sign, followed by one or more period-separated build metadata +// identifiers. + +var BUILD = R++; +src[BUILD] = '(?:\\+(' + src[BUILDIDENTIFIER] + + '(?:\\.' + src[BUILDIDENTIFIER] + ')*))'; + + +// ## Full Version String +// A main version, followed optionally by a pre-release version and +// build metadata. + +// Note that the only major, minor, patch, and pre-release sections of +// the version string are capturing groups. The build metadata is not a +// capturing group, because it should not ever be used in version +// comparison. + +var FULL = R++; +var FULLPLAIN = 'v?' + src[MAINVERSION] + + src[PRERELEASE] + '?' + + src[BUILD] + '?'; + +src[FULL] = '^' + FULLPLAIN + '$'; + +// like full, but allows v1.2.3 and =1.2.3, which people do sometimes. +// also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty +// common in the npm registry. +var LOOSEPLAIN = '[v=\\s]*' + src[MAINVERSIONLOOSE] + + src[PRERELEASELOOSE] + '?' + + src[BUILD] + '?'; + +var LOOSE = R++; +src[LOOSE] = '^' + LOOSEPLAIN + '$'; + +var GTLT = R++; +src[GTLT] = '((?:<|>)?=?)'; + +// Something like "2.*" or "1.2.x". +// Note that "x.x" is a valid xRange identifer, meaning "any version" +// Only the first item is strictly required. +var XRANGEIDENTIFIERLOOSE = R++; +src[XRANGEIDENTIFIERLOOSE] = src[NUMERICIDENTIFIERLOOSE] + '|x|X|\\*'; +var XRANGEIDENTIFIER = R++; +src[XRANGEIDENTIFIER] = src[NUMERICIDENTIFIER] + '|x|X|\\*'; + +var XRANGEPLAIN = R++; +src[XRANGEPLAIN] = '[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' + + '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + + '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + + '(?:' + src[PRERELEASE] + ')?' + + src[BUILD] + '?' + + ')?)?'; + +var XRANGEPLAINLOOSE = R++; +src[XRANGEPLAINLOOSE] = '[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' + + '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + + '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + + '(?:' + src[PRERELEASELOOSE] + ')?' + + src[BUILD] + '?' + + ')?)?'; + +var XRANGE = R++; +src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'; +var XRANGELOOSE = R++; +src[XRANGELOOSE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAINLOOSE] + '$'; + +// Coercion. +// Extract anything that could conceivably be a part of a valid semver +var COERCE = R++; +src[COERCE] = '(?:^|[^\\d])' + + '(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '})' + + '(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' + + '(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' + + '(?:$|[^\\d])'; + +// Tilde ranges. +// Meaning is "reasonably at or greater than" +var LONETILDE = R++; +src[LONETILDE] = '(?:~>?)'; + +var TILDETRIM = R++; +src[TILDETRIM] = '(\\s*)' + src[LONETILDE] + '\\s+'; +re[TILDETRIM] = new RegExp(src[TILDETRIM], 'g'); +var tildeTrimReplace = '$1~'; + +var TILDE = R++; +src[TILDE] = '^' + src[LONETILDE] + src[XRANGEPLAIN] + '$'; +var TILDELOOSE = R++; +src[TILDELOOSE] = '^' + src[LONETILDE] + src[XRANGEPLAINLOOSE] + '$'; + +// Caret ranges. +// Meaning is "at least and backwards compatible with" +var LONECARET = R++; +src[LONECARET] = '(?:\\^)'; + +var CARETTRIM = R++; +src[CARETTRIM] = '(\\s*)' + src[LONECARET] + '\\s+'; +re[CARETTRIM] = new RegExp(src[CARETTRIM], 'g'); +var caretTrimReplace = '$1^'; + +var CARET = R++; +src[CARET] = '^' + src[LONECARET] + src[XRANGEPLAIN] + '$'; +var CARETLOOSE = R++; +src[CARETLOOSE] = '^' + src[LONECARET] + src[XRANGEPLAINLOOSE] + '$'; + +// A simple gt/lt/eq thing, or just "" to indicate "any version" +var COMPARATORLOOSE = R++; +src[COMPARATORLOOSE] = '^' + src[GTLT] + '\\s*(' + LOOSEPLAIN + ')$|^$'; +var COMPARATOR = R++; +src[COMPARATOR] = '^' + src[GTLT] + '\\s*(' + FULLPLAIN + ')$|^$'; + + +// An expression to strip any whitespace between the gtlt and the thing +// it modifies, so that `> 1.2.3` ==> `>1.2.3` +var COMPARATORTRIM = R++; +src[COMPARATORTRIM] = '(\\s*)' + src[GTLT] + + '\\s*(' + LOOSEPLAIN + '|' + src[XRANGEPLAIN] + ')'; + +// this one has to use the /g flag +re[COMPARATORTRIM] = new RegExp(src[COMPARATORTRIM], 'g'); +var comparatorTrimReplace = '$1$2$3'; + + +// Something like `1.2.3 - 1.2.4` +// Note that these all use the loose form, because they'll be +// checked against either the strict or loose comparator form +// later. +var HYPHENRANGE = R++; +src[HYPHENRANGE] = '^\\s*(' + src[XRANGEPLAIN] + ')' + + '\\s+-\\s+' + + '(' + src[XRANGEPLAIN] + ')' + + '\\s*$'; + +var HYPHENRANGELOOSE = R++; +src[HYPHENRANGELOOSE] = '^\\s*(' + src[XRANGEPLAINLOOSE] + ')' + + '\\s+-\\s+' + + '(' + src[XRANGEPLAINLOOSE] + ')' + + '\\s*$'; + +// Star ranges basically just allow anything at all. +var STAR = R++; +src[STAR] = '(<|>)?=?\\s*\\*'; + +// Compile to actual regexp objects. +// All are flag-free, unless they were created above with a flag. +for (var i = 0; i < R; i++) { + debug(i, src[i]); + if (!re[i]) + re[i] = new RegExp(src[i]); +} + +exports.parse = parse; +function parse(version, loose) { + if (version instanceof SemVer) + return version; + + if (typeof version !== 'string') + return null; + + if (version.length > MAX_LENGTH) + return null; + + var r = loose ? re[LOOSE] : re[FULL]; + if (!r.test(version)) + return null; + + try { + return new SemVer(version, loose); + } catch (er) { + return null; + } +} + +exports.valid = valid; +function valid(version, loose) { + var v = parse(version, loose); + return v ? v.version : null; +} + + +exports.clean = clean; +function clean(version, loose) { + var s = parse(version.trim().replace(/^[=v]+/, ''), loose); + return s ? s.version : null; +} + +exports.SemVer = SemVer; + +function SemVer(version, loose) { + if (version instanceof SemVer) { + if (version.loose === loose) + return version; + else + version = version.version; + } else if (typeof version !== 'string') { + throw new TypeError('Invalid Version: ' + version); + } + + if (version.length > MAX_LENGTH) + throw new TypeError('version is longer than ' + MAX_LENGTH + ' characters') + + if (!(this instanceof SemVer)) + return new SemVer(version, loose); + + debug('SemVer', version, loose); + this.loose = loose; + var m = version.trim().match(loose ? re[LOOSE] : re[FULL]); + + if (!m) + throw new TypeError('Invalid Version: ' + version); + + this.raw = version; + + // these are actually numbers + this.major = +m[1]; + this.minor = +m[2]; + this.patch = +m[3]; + + if (this.major > MAX_SAFE_INTEGER || this.major < 0) + throw new TypeError('Invalid major version') + + if (this.minor > MAX_SAFE_INTEGER || this.minor < 0) + throw new TypeError('Invalid minor version') + + if (this.patch > MAX_SAFE_INTEGER || this.patch < 0) + throw new TypeError('Invalid patch version') + + // numberify any prerelease numeric ids + if (!m[4]) + this.prerelease = []; + else + this.prerelease = m[4].split('.').map(function(id) { + if (/^[0-9]+$/.test(id)) { + var num = +id; + if (num >= 0 && num < MAX_SAFE_INTEGER) + return num; + } + return id; + }); + + this.build = m[5] ? m[5].split('.') : []; + this.format(); +} + +SemVer.prototype.format = function() { + this.version = this.major + '.' + this.minor + '.' + this.patch; + if (this.prerelease.length) + this.version += '-' + this.prerelease.join('.'); + return this.version; +}; + +SemVer.prototype.toString = function() { + return this.version; +}; + +SemVer.prototype.compare = function(other) { + debug('SemVer.compare', this.version, this.loose, other); + if (!(other instanceof SemVer)) + other = new SemVer(other, this.loose); + + return this.compareMain(other) || this.comparePre(other); +}; + +SemVer.prototype.compareMain = function(other) { + if (!(other instanceof SemVer)) + other = new SemVer(other, this.loose); + + return compareIdentifiers(this.major, other.major) || + compareIdentifiers(this.minor, other.minor) || + compareIdentifiers(this.patch, other.patch); +}; + +SemVer.prototype.comparePre = function(other) { + if (!(other instanceof SemVer)) + other = new SemVer(other, this.loose); + + // NOT having a prerelease is > having one + if (this.prerelease.length && !other.prerelease.length) + return -1; + else if (!this.prerelease.length && other.prerelease.length) + return 1; + else if (!this.prerelease.length && !other.prerelease.length) + return 0; + + var i = 0; + do { + var a = this.prerelease[i]; + var b = other.prerelease[i]; + debug('prerelease compare', i, a, b); + if (a === undefined && b === undefined) + return 0; + else if (b === undefined) + return 1; + else if (a === undefined) + return -1; + else if (a === b) + continue; + else + return compareIdentifiers(a, b); + } while (++i); +}; + +// preminor will bump the version up to the next minor release, and immediately +// down to pre-release. premajor and prepatch work the same way. +SemVer.prototype.inc = function(release, identifier) { + switch (release) { + case 'premajor': + this.prerelease.length = 0; + this.patch = 0; + this.minor = 0; + this.major++; + this.inc('pre', identifier); + break; + case 'preminor': + this.prerelease.length = 0; + this.patch = 0; + this.minor++; + this.inc('pre', identifier); + break; + case 'prepatch': + // If this is already a prerelease, it will bump to the next version + // drop any prereleases that might already exist, since they are not + // relevant at this point. + this.prerelease.length = 0; + this.inc('patch', identifier); + this.inc('pre', identifier); + break; + // If the input is a non-prerelease version, this acts the same as + // prepatch. + case 'prerelease': + if (this.prerelease.length === 0) + this.inc('patch', identifier); + this.inc('pre', identifier); + break; + + case 'major': + // If this is a pre-major version, bump up to the same major version. + // Otherwise increment major. + // 1.0.0-5 bumps to 1.0.0 + // 1.1.0 bumps to 2.0.0 + if (this.minor !== 0 || this.patch !== 0 || this.prerelease.length === 0) + this.major++; + this.minor = 0; + this.patch = 0; + this.prerelease = []; + break; + case 'minor': + // If this is a pre-minor version, bump up to the same minor version. + // Otherwise increment minor. + // 1.2.0-5 bumps to 1.2.0 + // 1.2.1 bumps to 1.3.0 + if (this.patch !== 0 || this.prerelease.length === 0) + this.minor++; + this.patch = 0; + this.prerelease = []; + break; + case 'patch': + // If this is not a pre-release version, it will increment the patch. + // If it is a pre-release it will bump up to the same patch version. + // 1.2.0-5 patches to 1.2.0 + // 1.2.0 patches to 1.2.1 + if (this.prerelease.length === 0) + this.patch++; + this.prerelease = []; + break; + // This probably shouldn't be used publicly. + // 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction. + case 'pre': + if (this.prerelease.length === 0) + this.prerelease = [0]; + else { + var i = this.prerelease.length; + while (--i >= 0) { + if (typeof this.prerelease[i] === 'number') { + this.prerelease[i]++; + i = -2; + } + } + if (i === -1) // didn't increment anything + this.prerelease.push(0); + } + if (identifier) { + // 1.2.0-beta.1 bumps to 1.2.0-beta.2, + // 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0 + if (this.prerelease[0] === identifier) { + if (isNaN(this.prerelease[1])) + this.prerelease = [identifier, 0]; + } else + this.prerelease = [identifier, 0]; + } + break; + + default: + throw new Error('invalid increment argument: ' + release); + } + this.format(); + this.raw = this.version; + return this; +}; + +exports.inc = inc; +function inc(version, release, loose, identifier) { + if (typeof(loose) === 'string') { + identifier = loose; + loose = undefined; + } + + try { + return new SemVer(version, loose).inc(release, identifier).version; + } catch (er) { + return null; + } +} + +exports.diff = diff; +function diff(version1, version2) { + if (eq(version1, version2)) { + return null; + } else { + var v1 = parse(version1); + var v2 = parse(version2); + if (v1.prerelease.length || v2.prerelease.length) { + for (var key in v1) { + if (key === 'major' || key === 'minor' || key === 'patch') { + if (v1[key] !== v2[key]) { + return 'pre'+key; + } + } + } + return 'prerelease'; + } + for (var key in v1) { + if (key === 'major' || key === 'minor' || key === 'patch') { + if (v1[key] !== v2[key]) { + return key; + } + } + } + } +} + +exports.compareIdentifiers = compareIdentifiers; + +var numeric = /^[0-9]+$/; +function compareIdentifiers(a, b) { + var anum = numeric.test(a); + var bnum = numeric.test(b); + + if (anum && bnum) { + a = +a; + b = +b; + } + + return (anum && !bnum) ? -1 : + (bnum && !anum) ? 1 : + a < b ? -1 : + a > b ? 1 : + 0; +} + +exports.rcompareIdentifiers = rcompareIdentifiers; +function rcompareIdentifiers(a, b) { + return compareIdentifiers(b, a); +} + +exports.major = major; +function major(a, loose) { + return new SemVer(a, loose).major; +} + +exports.minor = minor; +function minor(a, loose) { + return new SemVer(a, loose).minor; +} + +exports.patch = patch; +function patch(a, loose) { + return new SemVer(a, loose).patch; +} + +exports.compare = compare; +function compare(a, b, loose) { + return new SemVer(a, loose).compare(new SemVer(b, loose)); +} + +exports.compareLoose = compareLoose; +function compareLoose(a, b) { + return compare(a, b, true); +} + +exports.rcompare = rcompare; +function rcompare(a, b, loose) { + return compare(b, a, loose); +} + +exports.sort = sort; +function sort(list, loose) { + return list.sort(function(a, b) { + return exports.compare(a, b, loose); + }); +} + +exports.rsort = rsort; +function rsort(list, loose) { + return list.sort(function(a, b) { + return exports.rcompare(a, b, loose); + }); +} + +exports.gt = gt; +function gt(a, b, loose) { + return compare(a, b, loose) > 0; +} + +exports.lt = lt; +function lt(a, b, loose) { + return compare(a, b, loose) < 0; +} + +exports.eq = eq; +function eq(a, b, loose) { + return compare(a, b, loose) === 0; +} + +exports.neq = neq; +function neq(a, b, loose) { + return compare(a, b, loose) !== 0; +} + +exports.gte = gte; +function gte(a, b, loose) { + return compare(a, b, loose) >= 0; +} + +exports.lte = lte; +function lte(a, b, loose) { + return compare(a, b, loose) <= 0; +} + +exports.cmp = cmp; +function cmp(a, op, b, loose) { + var ret; + switch (op) { + case '===': + if (typeof a === 'object') a = a.version; + if (typeof b === 'object') b = b.version; + ret = a === b; + break; + case '!==': + if (typeof a === 'object') a = a.version; + if (typeof b === 'object') b = b.version; + ret = a !== b; + break; + case '': case '=': case '==': ret = eq(a, b, loose); break; + case '!=': ret = neq(a, b, loose); break; + case '>': ret = gt(a, b, loose); break; + case '>=': ret = gte(a, b, loose); break; + case '<': ret = lt(a, b, loose); break; + case '<=': ret = lte(a, b, loose); break; + default: throw new TypeError('Invalid operator: ' + op); + } + return ret; +} + +exports.Comparator = Comparator; +function Comparator(comp, loose) { + if (comp instanceof Comparator) { + if (comp.loose === loose) + return comp; + else + comp = comp.value; + } + + if (!(this instanceof Comparator)) + return new Comparator(comp, loose); + + debug('comparator', comp, loose); + this.loose = loose; + this.parse(comp); + + if (this.semver === ANY) + this.value = ''; + else + this.value = this.operator + this.semver.version; + + debug('comp', this); +} + +var ANY = {}; +Comparator.prototype.parse = function(comp) { + var r = this.loose ? re[COMPARATORLOOSE] : re[COMPARATOR]; + var m = comp.match(r); + + if (!m) + throw new TypeError('Invalid comparator: ' + comp); + + this.operator = m[1]; + if (this.operator === '=') + this.operator = ''; + + // if it literally is just '>' or '' then allow anything. + if (!m[2]) + this.semver = ANY; + else + this.semver = new SemVer(m[2], this.loose); +}; + +Comparator.prototype.toString = function() { + return this.value; +}; + +Comparator.prototype.test = function(version) { + debug('Comparator.test', version, this.loose); + + if (this.semver === ANY) + return true; + + if (typeof version === 'string') + version = new SemVer(version, this.loose); + + return cmp(version, this.operator, this.semver, this.loose); +}; + +Comparator.prototype.intersects = function(comp, loose) { + if (!(comp instanceof Comparator)) { + throw new TypeError('a Comparator is required'); + } + + var rangeTmp; + + if (this.operator === '') { + rangeTmp = new Range(comp.value, loose); + return satisfies(this.value, rangeTmp, loose); + } else if (comp.operator === '') { + rangeTmp = new Range(this.value, loose); + return satisfies(comp.semver, rangeTmp, loose); + } + + var sameDirectionIncreasing = + (this.operator === '>=' || this.operator === '>') && + (comp.operator === '>=' || comp.operator === '>'); + var sameDirectionDecreasing = + (this.operator === '<=' || this.operator === '<') && + (comp.operator === '<=' || comp.operator === '<'); + var sameSemVer = this.semver.version === comp.semver.version; + var differentDirectionsInclusive = + (this.operator === '>=' || this.operator === '<=') && + (comp.operator === '>=' || comp.operator === '<='); + var oppositeDirectionsLessThan = + cmp(this.semver, '<', comp.semver, loose) && + ((this.operator === '>=' || this.operator === '>') && + (comp.operator === '<=' || comp.operator === '<')); + var oppositeDirectionsGreaterThan = + cmp(this.semver, '>', comp.semver, loose) && + ((this.operator === '<=' || this.operator === '<') && + (comp.operator === '>=' || comp.operator === '>')); + + return sameDirectionIncreasing || sameDirectionDecreasing || + (sameSemVer && differentDirectionsInclusive) || + oppositeDirectionsLessThan || oppositeDirectionsGreaterThan; +}; + + +exports.Range = Range; +function Range(range, loose) { + if (range instanceof Range) { + if (range.loose === loose) { + return range; + } else { + return new Range(range.raw, loose); + } + } + + if (range instanceof Comparator) { + return new Range(range.value, loose); + } + + if (!(this instanceof Range)) + return new Range(range, loose); + + this.loose = loose; + + // First, split based on boolean or || + this.raw = range; + this.set = range.split(/\s*\|\|\s*/).map(function(range) { + return this.parseRange(range.trim()); + }, this).filter(function(c) { + // throw out any that are not relevant for whatever reason + return c.length; + }); + + if (!this.set.length) { + throw new TypeError('Invalid SemVer Range: ' + range); + } + + this.format(); +} + +Range.prototype.format = function() { + this.range = this.set.map(function(comps) { + return comps.join(' ').trim(); + }).join('||').trim(); + return this.range; +}; + +Range.prototype.toString = function() { + return this.range; +}; + +Range.prototype.parseRange = function(range) { + var loose = this.loose; + range = range.trim(); + debug('range', range, loose); + // `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4` + var hr = loose ? re[HYPHENRANGELOOSE] : re[HYPHENRANGE]; + range = range.replace(hr, hyphenReplace); + debug('hyphen replace', range); + // `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5` + range = range.replace(re[COMPARATORTRIM], comparatorTrimReplace); + debug('comparator trim', range, re[COMPARATORTRIM]); + + // `~ 1.2.3` => `~1.2.3` + range = range.replace(re[TILDETRIM], tildeTrimReplace); + + // `^ 1.2.3` => `^1.2.3` + range = range.replace(re[CARETTRIM], caretTrimReplace); + + // normalize spaces + range = range.split(/\s+/).join(' '); + + // At this point, the range is completely trimmed and + // ready to be split into comparators. + + var compRe = loose ? re[COMPARATORLOOSE] : re[COMPARATOR]; + var set = range.split(' ').map(function(comp) { + return parseComparator(comp, loose); + }).join(' ').split(/\s+/); + if (this.loose) { + // in loose mode, throw out any that are not valid comparators + set = set.filter(function(comp) { + return !!comp.match(compRe); + }); + } + set = set.map(function(comp) { + return new Comparator(comp, loose); + }); + + return set; +}; + +Range.prototype.intersects = function(range, loose) { + if (!(range instanceof Range)) { + throw new TypeError('a Range is required'); + } + + return this.set.some(function(thisComparators) { + return thisComparators.every(function(thisComparator) { + return range.set.some(function(rangeComparators) { + return rangeComparators.every(function(rangeComparator) { + return thisComparator.intersects(rangeComparator, loose); + }); + }); + }); + }); +}; + +// Mostly just for testing and legacy API reasons +exports.toComparators = toComparators; +function toComparators(range, loose) { + return new Range(range, loose).set.map(function(comp) { + return comp.map(function(c) { + return c.value; + }).join(' ').trim().split(' '); + }); +} + +// comprised of xranges, tildes, stars, and gtlt's at this point. +// already replaced the hyphen ranges +// turn into a set of JUST comparators. +function parseComparator(comp, loose) { + debug('comp', comp); + comp = replaceCarets(comp, loose); + debug('caret', comp); + comp = replaceTildes(comp, loose); + debug('tildes', comp); + comp = replaceXRanges(comp, loose); + debug('xrange', comp); + comp = replaceStars(comp, loose); + debug('stars', comp); + return comp; +} + +function isX(id) { + return !id || id.toLowerCase() === 'x' || id === '*'; +} + +// ~, ~> --> * (any, kinda silly) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0 +function replaceTildes(comp, loose) { + return comp.trim().split(/\s+/).map(function(comp) { + return replaceTilde(comp, loose); + }).join(' '); +} + +function replaceTilde(comp, loose) { + var r = loose ? re[TILDELOOSE] : re[TILDE]; + return comp.replace(r, function(_, M, m, p, pr) { + debug('tilde', comp, _, M, m, p, pr); + var ret; + + if (isX(M)) + ret = ''; + else if (isX(m)) + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; + else if (isX(p)) + // ~1.2 == >=1.2.0 <1.3.0 + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; + else if (pr) { + debug('replaceTilde pr', pr); + if (pr.charAt(0) !== '-') + pr = '-' + pr; + ret = '>=' + M + '.' + m + '.' + p + pr + + ' <' + M + '.' + (+m + 1) + '.0'; + } else + // ~1.2.3 == >=1.2.3 <1.3.0 + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + (+m + 1) + '.0'; + + debug('tilde return', ret); + return ret; + }); +} + +// ^ --> * (any, kinda silly) +// ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0 <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0 <2.0.0 +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2.0 --> >=1.2.0 <2.0.0 +function replaceCarets(comp, loose) { + return comp.trim().split(/\s+/).map(function(comp) { + return replaceCaret(comp, loose); + }).join(' '); +} + +function replaceCaret(comp, loose) { + debug('caret', comp, loose); + var r = loose ? re[CARETLOOSE] : re[CARET]; + return comp.replace(r, function(_, M, m, p, pr) { + debug('caret', comp, _, M, m, p, pr); + var ret; + + if (isX(M)) + ret = ''; + else if (isX(m)) + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; + else if (isX(p)) { + if (M === '0') + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; + else + ret = '>=' + M + '.' + m + '.0 <' + (+M + 1) + '.0.0'; + } else if (pr) { + debug('replaceCaret pr', pr); + if (pr.charAt(0) !== '-') + pr = '-' + pr; + if (M === '0') { + if (m === '0') + ret = '>=' + M + '.' + m + '.' + p + pr + + ' <' + M + '.' + m + '.' + (+p + 1); + else + ret = '>=' + M + '.' + m + '.' + p + pr + + ' <' + M + '.' + (+m + 1) + '.0'; + } else + ret = '>=' + M + '.' + m + '.' + p + pr + + ' <' + (+M + 1) + '.0.0'; + } else { + debug('no pr'); + if (M === '0') { + if (m === '0') + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + m + '.' + (+p + 1); + else + ret = '>=' + M + '.' + m + '.' + p + + ' <' + M + '.' + (+m + 1) + '.0'; + } else + ret = '>=' + M + '.' + m + '.' + p + + ' <' + (+M + 1) + '.0.0'; + } + + debug('caret return', ret); + return ret; + }); +} + +function replaceXRanges(comp, loose) { + debug('replaceXRanges', comp, loose); + return comp.split(/\s+/).map(function(comp) { + return replaceXRange(comp, loose); + }).join(' '); +} + +function replaceXRange(comp, loose) { + comp = comp.trim(); + var r = loose ? re[XRANGELOOSE] : re[XRANGE]; + return comp.replace(r, function(ret, gtlt, M, m, p, pr) { + debug('xRange', comp, ret, gtlt, M, m, p, pr); + var xM = isX(M); + var xm = xM || isX(m); + var xp = xm || isX(p); + var anyX = xp; + + if (gtlt === '=' && anyX) + gtlt = ''; + + if (xM) { + if (gtlt === '>' || gtlt === '<') { + // nothing is allowed + ret = '<0.0.0'; + } else { + // nothing is forbidden + ret = '*'; + } + } else if (gtlt && anyX) { + // replace X with 0 + if (xm) + m = 0; + if (xp) + p = 0; + + if (gtlt === '>') { + // >1 => >=2.0.0 + // >1.2 => >=1.3.0 + // >1.2.3 => >= 1.2.4 + gtlt = '>='; + if (xm) { + M = +M + 1; + m = 0; + p = 0; + } else if (xp) { + m = +m + 1; + p = 0; + } + } else if (gtlt === '<=') { + // <=0.7.x is actually <0.8.0, since any 0.7.x should + // pass. Similarly, <=7.x is actually <8.0.0, etc. + gtlt = '<'; + if (xm) + M = +M + 1; + else + m = +m + 1; + } + + ret = gtlt + M + '.' + m + '.' + p; + } else if (xm) { + ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; + } else if (xp) { + ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; + } + + debug('xRange return', ret); + + return ret; + }); +} + +// Because * is AND-ed with everything else in the comparator, +// and '' means "any version", just remove the *s entirely. +function replaceStars(comp, loose) { + debug('replaceStars', comp, loose); + // Looseness is ignored here. star is always as loose as it gets! + return comp.trim().replace(re[STAR], ''); +} + +// This function is passed to string.replace(re[HYPHENRANGE]) +// M, m, patch, prerelease, build +// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5 +// 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do +// 1.2 - 3.4 => >=1.2.0 <3.5.0 +function hyphenReplace($0, + from, fM, fm, fp, fpr, fb, + to, tM, tm, tp, tpr, tb) { + + if (isX(fM)) + from = ''; + else if (isX(fm)) + from = '>=' + fM + '.0.0'; + else if (isX(fp)) + from = '>=' + fM + '.' + fm + '.0'; + else + from = '>=' + from; + + if (isX(tM)) + to = ''; + else if (isX(tm)) + to = '<' + (+tM + 1) + '.0.0'; + else if (isX(tp)) + to = '<' + tM + '.' + (+tm + 1) + '.0'; + else if (tpr) + to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr; + else + to = '<=' + to; + + return (from + ' ' + to).trim(); +} + + +// if ANY of the sets match ALL of its comparators, then pass +Range.prototype.test = function(version) { + if (!version) + return false; + + if (typeof version === 'string') + version = new SemVer(version, this.loose); + + for (var i = 0; i < this.set.length; i++) { + if (testSet(this.set[i], version)) + return true; + } + return false; +}; + +function testSet(set, version) { + for (var i = 0; i < set.length; i++) { + if (!set[i].test(version)) + return false; + } + + if (version.prerelease.length) { + // Find the set of versions that are allowed to have prereleases + // For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0 + // That should allow `1.2.3-pr.2` to pass. + // However, `1.2.4-alpha.notready` should NOT be allowed, + // even though it's within the range set by the comparators. + for (var i = 0; i < set.length; i++) { + debug(set[i].semver); + if (set[i].semver === ANY) + continue; + + if (set[i].semver.prerelease.length > 0) { + var allowed = set[i].semver; + if (allowed.major === version.major && + allowed.minor === version.minor && + allowed.patch === version.patch) + return true; + } + } + + // Version has a -pre, but it's not one of the ones we like. + return false; + } + + return true; +} + +exports.satisfies = satisfies; +function satisfies(version, range, loose) { + try { + range = new Range(range, loose); + } catch (er) { + return false; + } + return range.test(version); +} + +exports.maxSatisfying = maxSatisfying; +function maxSatisfying(versions, range, loose) { + var max = null; + var maxSV = null; + try { + var rangeObj = new Range(range, loose); + } catch (er) { + return null; + } + versions.forEach(function (v) { + if (rangeObj.test(v)) { // satisfies(v, range, loose) + if (!max || maxSV.compare(v) === -1) { // compare(max, v, true) + max = v; + maxSV = new SemVer(max, loose); + } + } + }) + return max; +} + +exports.minSatisfying = minSatisfying; +function minSatisfying(versions, range, loose) { + var min = null; + var minSV = null; + try { + var rangeObj = new Range(range, loose); + } catch (er) { + return null; + } + versions.forEach(function (v) { + if (rangeObj.test(v)) { // satisfies(v, range, loose) + if (!min || minSV.compare(v) === 1) { // compare(min, v, true) + min = v; + minSV = new SemVer(min, loose); + } + } + }) + return min; +} + +exports.validRange = validRange; +function validRange(range, loose) { + try { + // Return '*' instead of '' so that truthiness works. + // This will throw if it's invalid anyway + return new Range(range, loose).range || '*'; + } catch (er) { + return null; + } +} + +// Determine if version is less than all the versions possible in the range +exports.ltr = ltr; +function ltr(version, range, loose) { + return outside(version, range, '<', loose); +} + +// Determine if version is greater than all the versions possible in the range. +exports.gtr = gtr; +function gtr(version, range, loose) { + return outside(version, range, '>', loose); +} + +exports.outside = outside; +function outside(version, range, hilo, loose) { + version = new SemVer(version, loose); + range = new Range(range, loose); + + var gtfn, ltefn, ltfn, comp, ecomp; + switch (hilo) { + case '>': + gtfn = gt; + ltefn = lte; + ltfn = lt; + comp = '>'; + ecomp = '>='; + break; + case '<': + gtfn = lt; + ltefn = gte; + ltfn = gt; + comp = '<'; + ecomp = '<='; + break; + default: + throw new TypeError('Must provide a hilo val of "<" or ">"'); + } + + // If it satisifes the range it is not outside + if (satisfies(version, range, loose)) { + return false; + } + + // From now on, variable terms are as if we're in "gtr" mode. + // but note that everything is flipped for the "ltr" function. + + for (var i = 0; i < range.set.length; ++i) { + var comparators = range.set[i]; + + var high = null; + var low = null; + + comparators.forEach(function(comparator) { + if (comparator.semver === ANY) { + comparator = new Comparator('>=0.0.0') + } + high = high || comparator; + low = low || comparator; + if (gtfn(comparator.semver, high.semver, loose)) { + high = comparator; + } else if (ltfn(comparator.semver, low.semver, loose)) { + low = comparator; + } + }); + + // If the edge version comparator has a operator then our version + // isn't outside it + if (high.operator === comp || high.operator === ecomp) { + return false; + } + + // If the lowest version comparator has an operator and our version + // is less than it then it isn't higher than the range + if ((!low.operator || low.operator === comp) && + ltefn(version, low.semver)) { + return false; + } else if (low.operator === ecomp && ltfn(version, low.semver)) { + return false; + } + } + return true; +} + +exports.prerelease = prerelease; +function prerelease(version, loose) { + var parsed = parse(version, loose); + return (parsed && parsed.prerelease.length) ? parsed.prerelease : null; +} + +exports.intersects = intersects; +function intersects(r1, r2, loose) { + r1 = new Range(r1, loose) + r2 = new Range(r2, loose) + return r1.intersects(r2) +} + +exports.coerce = coerce; +function coerce(version) { + if (version instanceof SemVer) + return version; + + if (typeof version !== 'string') + return null; + + var match = version.match(re[COERCE]); + + if (match == null) + return null; + + return parse((match[1] || '0') + '.' + (match[2] || '0') + '.' + (match[3] || '0')); +} + + +/***/ }), +/* 23 */ +/***/ (function(module, exports) { + +module.exports = require("stream"); + +/***/ }), +/* 24 */ +/***/ (function(module, exports) { + +module.exports = require("url"); + +/***/ }), +/* 25 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subscription; }); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__util_isArray__ = __webpack_require__(41); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_isObject__ = __webpack_require__(444); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__util_isFunction__ = __webpack_require__(154); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__util_tryCatch__ = __webpack_require__(56); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__util_errorObject__ = __webpack_require__(47); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__ = __webpack_require__(441); +/** PURE_IMPORTS_START _util_isArray,_util_isObject,_util_isFunction,_util_tryCatch,_util_errorObject,_util_UnsubscriptionError PURE_IMPORTS_END */ + + + + + + +var Subscription = /*@__PURE__*/ (function () { + function Subscription(unsubscribe) { + this.closed = false; + this._parent = null; + this._parents = null; + this._subscriptions = null; + if (unsubscribe) { + this._unsubscribe = unsubscribe; + } + } + Subscription.prototype.unsubscribe = function () { + var hasErrors = false; + var errors; + if (this.closed) { + return; + } + var _a = this, _parent = _a._parent, _parents = _a._parents, _unsubscribe = _a._unsubscribe, _subscriptions = _a._subscriptions; + this.closed = true; + this._parent = null; + this._parents = null; + this._subscriptions = null; + var index = -1; + var len = _parents ? _parents.length : 0; + while (_parent) { + _parent.remove(this); + _parent = ++index < len && _parents[index] || null; + } + if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_2__util_isFunction__["a" /* isFunction */])(_unsubscribe)) { + var trial = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_tryCatch__["a" /* tryCatch */])(_unsubscribe).call(this); + if (trial === __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */]) { + hasErrors = true; + errors = errors || (__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */] ? + flattenUnsubscriptionErrors(__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e.errors) : [__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e]); + } + } + if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_0__util_isArray__["a" /* isArray */])(_subscriptions)) { + index = -1; + len = _subscriptions.length; + while (++index < len) { + var sub = _subscriptions[index]; + if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isObject__["a" /* isObject */])(sub)) { + var trial = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_tryCatch__["a" /* tryCatch */])(sub.unsubscribe).call(sub); + if (trial === __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */]) { + hasErrors = true; + errors = errors || []; + var err = __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e; + if (err instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */]) { + errors = errors.concat(flattenUnsubscriptionErrors(err.errors)); + } + else { + errors.push(err); + } + } + } + } + } + if (hasErrors) { + throw new __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */](errors); + } + }; + Subscription.prototype.add = function (teardown) { + if (!teardown || (teardown === Subscription.EMPTY)) { + return Subscription.EMPTY; + } + if (teardown === this) { + return this; + } + var subscription = teardown; + switch (typeof teardown) { + case 'function': + subscription = new Subscription(teardown); + case 'object': + if (subscription.closed || typeof subscription.unsubscribe !== 'function') { + return subscription; + } + else if (this.closed) { + subscription.unsubscribe(); + return subscription; + } + else if (typeof subscription._addParent !== 'function') { + var tmp = subscription; + subscription = new Subscription(); + subscription._subscriptions = [tmp]; + } + break; + default: + throw new Error('unrecognized teardown ' + teardown + ' added to Subscription.'); + } + var subscriptions = this._subscriptions || (this._subscriptions = []); + subscriptions.push(subscription); + subscription._addParent(this); + return subscription; + }; + Subscription.prototype.remove = function (subscription) { + var subscriptions = this._subscriptions; + if (subscriptions) { + var subscriptionIndex = subscriptions.indexOf(subscription); + if (subscriptionIndex !== -1) { + subscriptions.splice(subscriptionIndex, 1); + } + } + }; + Subscription.prototype._addParent = function (parent) { + var _a = this, _parent = _a._parent, _parents = _a._parents; + if (!_parent || _parent === parent) { + this._parent = parent; + } + else if (!_parents) { + this._parents = [parent]; + } + else if (_parents.indexOf(parent) === -1) { + _parents.push(parent); + } + }; + Subscription.EMPTY = (function (empty) { + empty.closed = true; + return empty; + }(new Subscription())); + return Subscription; +}()); + +function flattenUnsubscriptionErrors(errors) { + return errors.reduce(function (errs, err) { return errs.concat((err instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */]) ? err.errors : err); }, []); +} +//# sourceMappingURL=Subscription.js.map + + +/***/ }), +/* 26 */ +/***/ (function(module, exports, __webpack_require__) { + +// Copyright 2015 Joyent, Inc. + +module.exports = { + bufferSplit: bufferSplit, + addRSAMissing: addRSAMissing, + calculateDSAPublic: calculateDSAPublic, + calculateED25519Public: calculateED25519Public, + calculateX25519Public: calculateX25519Public, + mpNormalize: mpNormalize, + mpDenormalize: mpDenormalize, + ecNormalize: ecNormalize, + countZeros: countZeros, + assertCompatible: assertCompatible, + isCompatible: isCompatible, + opensslKeyDeriv: opensslKeyDeriv, + opensshCipherInfo: opensshCipherInfo, + publicFromPrivateECDSA: publicFromPrivateECDSA, + zeroPadToLength: zeroPadToLength, + writeBitString: writeBitString, + readBitString: readBitString +}; + +var assert = __webpack_require__(16); +var Buffer = __webpack_require__(15).Buffer; +var PrivateKey = __webpack_require__(33); +var Key = __webpack_require__(27); +var crypto = __webpack_require__(11); +var algs = __webpack_require__(32); +var asn1 = __webpack_require__(66); + +var ec, jsbn; +var nacl; + +var MAX_CLASS_DEPTH = 3; + +function isCompatible(obj, klass, needVer) { + if (obj === null || typeof (obj) !== 'object') + return (false); + if (needVer === undefined) + needVer = klass.prototype._sshpkApiVersion; + if (obj instanceof klass && + klass.prototype._sshpkApiVersion[0] == needVer[0]) + return (true); + var proto = Object.getPrototypeOf(obj); + var depth = 0; + while (proto.constructor.name !== klass.name) { + proto = Object.getPrototypeOf(proto); + if (!proto || ++depth > MAX_CLASS_DEPTH) + return (false); + } + if (proto.constructor.name !== klass.name) + return (false); + var ver = proto._sshpkApiVersion; + if (ver === undefined) + ver = klass._oldVersionDetect(obj); + if (ver[0] != needVer[0] || ver[1] < needVer[1]) + return (false); + return (true); +} + +function assertCompatible(obj, klass, needVer, name) { + if (name === undefined) + name = 'object'; + assert.ok(obj, name + ' must not be null'); + assert.object(obj, name + ' must be an object'); + if (needVer === undefined) + needVer = klass.prototype._sshpkApiVersion; + if (obj instanceof klass && + klass.prototype._sshpkApiVersion[0] == needVer[0]) + return; + var proto = Object.getPrototypeOf(obj); + var depth = 0; + while (proto.constructor.name !== klass.name) { + proto = Object.getPrototypeOf(proto); + assert.ok(proto && ++depth <= MAX_CLASS_DEPTH, + name + ' must be a ' + klass.name + ' instance'); + } + assert.strictEqual(proto.constructor.name, klass.name, + name + ' must be a ' + klass.name + ' instance'); + var ver = proto._sshpkApiVersion; + if (ver === undefined) + ver = klass._oldVersionDetect(obj); + assert.ok(ver[0] == needVer[0] && ver[1] >= needVer[1], + name + ' must be compatible with ' + klass.name + ' klass ' + + 'version ' + needVer[0] + '.' + needVer[1]); +} + +var CIPHER_LEN = { + 'des-ede3-cbc': { key: 7, iv: 8 }, + 'aes-128-cbc': { key: 16, iv: 16 } +}; +var PKCS5_SALT_LEN = 8; + +function opensslKeyDeriv(cipher, salt, passphrase, count) { + assert.buffer(salt, 'salt'); + assert.buffer(passphrase, 'passphrase'); + assert.number(count, 'iteration count'); + + var clen = CIPHER_LEN[cipher]; + assert.object(clen, 'supported cipher'); + + salt = salt.slice(0, PKCS5_SALT_LEN); + + var D, D_prev, bufs; + var material = Buffer.alloc(0); + while (material.length < clen.key + clen.iv) { + bufs = []; + if (D_prev) + bufs.push(D_prev); + bufs.push(passphrase); + bufs.push(salt); + D = Buffer.concat(bufs); + for (var j = 0; j < count; ++j) + D = crypto.createHash('md5').update(D).digest(); + material = Buffer.concat([material, D]); + D_prev = D; + } + + return ({ + key: material.slice(0, clen.key), + iv: material.slice(clen.key, clen.key + clen.iv) + }); +} + +/* Count leading zero bits on a buffer */ +function countZeros(buf) { + var o = 0, obit = 8; + while (o < buf.length) { + var mask = (1 << obit); + if ((buf[o] & mask) === mask) + break; + obit--; + if (obit < 0) { + o++; + obit = 8; + } + } + return (o*8 + (8 - obit) - 1); +} + +function bufferSplit(buf, chr) { + assert.buffer(buf); + assert.string(chr); + + var parts = []; + var lastPart = 0; + var matches = 0; + for (var i = 0; i < buf.length; ++i) { + if (buf[i] === chr.charCodeAt(matches)) + ++matches; + else if (buf[i] === chr.charCodeAt(0)) + matches = 1; + else + matches = 0; + + if (matches >= chr.length) { + var newPart = i + 1; + parts.push(buf.slice(lastPart, newPart - matches)); + lastPart = newPart; + matches = 0; + } + } + if (lastPart <= buf.length) + parts.push(buf.slice(lastPart, buf.length)); + + return (parts); +} + +function ecNormalize(buf, addZero) { + assert.buffer(buf); + if (buf[0] === 0x00 && buf[1] === 0x04) { + if (addZero) + return (buf); + return (buf.slice(1)); + } else if (buf[0] === 0x04) { + if (!addZero) + return (buf); + } else { + while (buf[0] === 0x00) + buf = buf.slice(1); + if (buf[0] === 0x02 || buf[0] === 0x03) + throw (new Error('Compressed elliptic curve points ' + + 'are not supported')); + if (buf[0] !== 0x04) + throw (new Error('Not a valid elliptic curve point')); + if (!addZero) + return (buf); + } + var b = Buffer.alloc(buf.length + 1); + b[0] = 0x0; + buf.copy(b, 1); + return (b); +} + +function readBitString(der, tag) { + if (tag === undefined) + tag = asn1.Ber.BitString; + var buf = der.readString(tag, true); + assert.strictEqual(buf[0], 0x00, 'bit strings with unused bits are ' + + 'not supported (0x' + buf[0].toString(16) + ')'); + return (buf.slice(1)); +} + +function writeBitString(der, buf, tag) { + if (tag === undefined) + tag = asn1.Ber.BitString; + var b = Buffer.alloc(buf.length + 1); + b[0] = 0x00; + buf.copy(b, 1); + der.writeBuffer(b, tag); +} + +function mpNormalize(buf) { + assert.buffer(buf); + while (buf.length > 1 && buf[0] === 0x00 && (buf[1] & 0x80) === 0x00) + buf = buf.slice(1); + if ((buf[0] & 0x80) === 0x80) { + var b = Buffer.alloc(buf.length + 1); + b[0] = 0x00; + buf.copy(b, 1); + buf = b; + } + return (buf); +} + +function mpDenormalize(buf) { + assert.buffer(buf); + while (buf.length > 1 && buf[0] === 0x00) + buf = buf.slice(1); + return (buf); +} + +function zeroPadToLength(buf, len) { + assert.buffer(buf); + assert.number(len); + while (buf.length > len) { + assert.equal(buf[0], 0x00); + buf = buf.slice(1); + } + while (buf.length < len) { + var b = Buffer.alloc(buf.length + 1); + b[0] = 0x00; + buf.copy(b, 1); + buf = b; + } + return (buf); +} + +function bigintToMpBuf(bigint) { + var buf = Buffer.from(bigint.toByteArray()); + buf = mpNormalize(buf); + return (buf); +} + +function calculateDSAPublic(g, p, x) { + assert.buffer(g); + assert.buffer(p); + assert.buffer(x); + try { + var bigInt = __webpack_require__(81).BigInteger; + } catch (e) { + throw (new Error('To load a PKCS#8 format DSA private key, ' + + 'the node jsbn library is required.')); + } + g = new bigInt(g); + p = new bigInt(p); + x = new bigInt(x); + var y = g.modPow(x, p); + var ybuf = bigintToMpBuf(y); + return (ybuf); +} + +function calculateED25519Public(k) { + assert.buffer(k); + + if (nacl === undefined) + nacl = __webpack_require__(76); + + var kp = nacl.sign.keyPair.fromSeed(new Uint8Array(k)); + return (Buffer.from(kp.publicKey)); +} + +function calculateX25519Public(k) { + assert.buffer(k); + + if (nacl === undefined) + nacl = __webpack_require__(76); + + var kp = nacl.box.keyPair.fromSeed(new Uint8Array(k)); + return (Buffer.from(kp.publicKey)); +} + +function addRSAMissing(key) { + assert.object(key); + assertCompatible(key, PrivateKey, [1, 1]); + try { + var bigInt = __webpack_require__(81).BigInteger; + } catch (e) { + throw (new Error('To write a PEM private key from ' + + 'this source, the node jsbn lib is required.')); + } + + var d = new bigInt(key.part.d.data); + var buf; + + if (!key.part.dmodp) { + var p = new bigInt(key.part.p.data); + var dmodp = d.mod(p.subtract(1)); + + buf = bigintToMpBuf(dmodp); + key.part.dmodp = {name: 'dmodp', data: buf}; + key.parts.push(key.part.dmodp); + } + if (!key.part.dmodq) { + var q = new bigInt(key.part.q.data); + var dmodq = d.mod(q.subtract(1)); + + buf = bigintToMpBuf(dmodq); + key.part.dmodq = {name: 'dmodq', data: buf}; + key.parts.push(key.part.dmodq); + } +} + +function publicFromPrivateECDSA(curveName, priv) { + assert.string(curveName, 'curveName'); + assert.buffer(priv); + if (ec === undefined) + ec = __webpack_require__(139); + if (jsbn === undefined) + jsbn = __webpack_require__(81).BigInteger; + var params = algs.curves[curveName]; + var p = new jsbn(params.p); + var a = new jsbn(params.a); + var b = new jsbn(params.b); + var curve = new ec.ECCurveFp(p, a, b); + var G = curve.decodePointHex(params.G.toString('hex')); + + var d = new jsbn(mpNormalize(priv)); + var pub = G.multiply(d); + pub = Buffer.from(curve.encodePointHex(pub), 'hex'); + + var parts = []; + parts.push({name: 'curve', data: Buffer.from(curveName)}); + parts.push({name: 'Q', data: pub}); + + var key = new Key({type: 'ecdsa', curve: curve, parts: parts}); + return (key); +} + +function opensshCipherInfo(cipher) { + var inf = {}; + switch (cipher) { + case '3des-cbc': + inf.keySize = 24; + inf.blockSize = 8; + inf.opensslName = 'des-ede3-cbc'; + break; + case 'blowfish-cbc': + inf.keySize = 16; + inf.blockSize = 8; + inf.opensslName = 'bf-cbc'; + break; + case 'aes128-cbc': + case 'aes128-ctr': + case 'aes128-gcm@openssh.com': + inf.keySize = 16; + inf.blockSize = 16; + inf.opensslName = 'aes-128-' + cipher.slice(7, 10); + break; + case 'aes192-cbc': + case 'aes192-ctr': + case 'aes192-gcm@openssh.com': + inf.keySize = 24; + inf.blockSize = 16; + inf.opensslName = 'aes-192-' + cipher.slice(7, 10); + break; + case 'aes256-cbc': + case 'aes256-ctr': + case 'aes256-gcm@openssh.com': + inf.keySize = 32; + inf.blockSize = 16; + inf.opensslName = 'aes-256-' + cipher.slice(7, 10); + break; + default: + throw (new Error( + 'Unsupported openssl cipher "' + cipher + '"')); + } + return (inf); +} + + +/***/ }), +/* 27 */ +/***/ (function(module, exports, __webpack_require__) { + +// Copyright 2017 Joyent, Inc. + +module.exports = Key; + +var assert = __webpack_require__(16); +var algs = __webpack_require__(32); +var crypto = __webpack_require__(11); +var Fingerprint = __webpack_require__(156); +var Signature = __webpack_require__(75); +var DiffieHellman = __webpack_require__(325).DiffieHellman; +var errs = __webpack_require__(74); +var utils = __webpack_require__(26); +var PrivateKey = __webpack_require__(33); +var edCompat; + +try { + edCompat = __webpack_require__(454); +} catch (e) { + /* Just continue through, and bail out if we try to use it. */ +} + +var InvalidAlgorithmError = errs.InvalidAlgorithmError; +var KeyParseError = errs.KeyParseError; + +var formats = {}; +formats['auto'] = __webpack_require__(455); +formats['pem'] = __webpack_require__(86); +formats['pkcs1'] = __webpack_require__(327); +formats['pkcs8'] = __webpack_require__(157); +formats['rfc4253'] = __webpack_require__(103); +formats['ssh'] = __webpack_require__(456); +formats['ssh-private'] = __webpack_require__(192); +formats['openssh'] = formats['ssh-private']; +formats['dnssec'] = __webpack_require__(326); + +function Key(opts) { + assert.object(opts, 'options'); + assert.arrayOfObject(opts.parts, 'options.parts'); + assert.string(opts.type, 'options.type'); + assert.optionalString(opts.comment, 'options.comment'); + + var algInfo = algs.info[opts.type]; + if (typeof (algInfo) !== 'object') + throw (new InvalidAlgorithmError(opts.type)); + + var partLookup = {}; + for (var i = 0; i < opts.parts.length; ++i) { + var part = opts.parts[i]; + partLookup[part.name] = part; + } + + this.type = opts.type; + this.parts = opts.parts; + this.part = partLookup; + this.comment = undefined; + this.source = opts.source; + + /* for speeding up hashing/fingerprint operations */ + this._rfc4253Cache = opts._rfc4253Cache; + this._hashCache = {}; + + var sz; + this.curve = undefined; + if (this.type === 'ecdsa') { + var curve = this.part.curve.data.toString(); + this.curve = curve; + sz = algs.curves[curve].size; + } else if (this.type === 'ed25519' || this.type === 'curve25519') { + sz = 256; + this.curve = 'curve25519'; + } else { + var szPart = this.part[algInfo.sizePart]; + sz = szPart.data.length; + sz = sz * 8 - utils.countZeros(szPart.data); + } + this.size = sz; +} + +Key.formats = formats; + +Key.prototype.toBuffer = function (format, options) { + if (format === undefined) + format = 'ssh'; + assert.string(format, 'format'); + assert.object(formats[format], 'formats[format]'); + assert.optionalObject(options, 'options'); + + if (format === 'rfc4253') { + if (this._rfc4253Cache === undefined) + this._rfc4253Cache = formats['rfc4253'].write(this); + return (this._rfc4253Cache); + } + + return (formats[format].write(this, options)); +}; + +Key.prototype.toString = function (format, options) { + return (this.toBuffer(format, options).toString()); +}; + +Key.prototype.hash = function (algo) { + assert.string(algo, 'algorithm'); + algo = algo.toLowerCase(); + if (algs.hashAlgs[algo] === undefined) + throw (new InvalidAlgorithmError(algo)); + + if (this._hashCache[algo]) + return (this._hashCache[algo]); + var hash = crypto.createHash(algo). + update(this.toBuffer('rfc4253')).digest(); + this._hashCache[algo] = hash; + return (hash); +}; + +Key.prototype.fingerprint = function (algo) { + if (algo === undefined) + algo = 'sha256'; + assert.string(algo, 'algorithm'); + var opts = { + type: 'key', + hash: this.hash(algo), + algorithm: algo + }; + return (new Fingerprint(opts)); +}; + +Key.prototype.defaultHashAlgorithm = function () { + var hashAlgo = 'sha1'; + if (this.type === 'rsa') + hashAlgo = 'sha256'; + if (this.type === 'dsa' && this.size > 1024) + hashAlgo = 'sha256'; + if (this.type === 'ed25519') + hashAlgo = 'sha512'; + if (this.type === 'ecdsa') { + if (this.size <= 256) + hashAlgo = 'sha256'; + else if (this.size <= 384) + hashAlgo = 'sha384'; + else + hashAlgo = 'sha512'; + } + return (hashAlgo); +}; + +Key.prototype.createVerify = function (hashAlgo) { + if (hashAlgo === undefined) + hashAlgo = this.defaultHashAlgorithm(); + assert.string(hashAlgo, 'hash algorithm'); + + /* ED25519 is not supported by OpenSSL, use a javascript impl. */ + if (this.type === 'ed25519' && edCompat !== undefined) + return (new edCompat.Verifier(this, hashAlgo)); + if (this.type === 'curve25519') + throw (new Error('Curve25519 keys are not suitable for ' + + 'signing or verification')); + + var v, nm, err; + try { + nm = hashAlgo.toUpperCase(); + v = crypto.createVerify(nm); + } catch (e) { + err = e; + } + if (v === undefined || (err instanceof Error && + err.message.match(/Unknown message digest/))) { + nm = 'RSA-'; + nm += hashAlgo.toUpperCase(); + v = crypto.createVerify(nm); + } + assert.ok(v, 'failed to create verifier'); + var oldVerify = v.verify.bind(v); + var key = this.toBuffer('pkcs8'); + var curve = this.curve; + var self = this; + v.verify = function (signature, fmt) { + if (Signature.isSignature(signature, [2, 0])) { + if (signature.type !== self.type) + return (false); + if (signature.hashAlgorithm && + signature.hashAlgorithm !== hashAlgo) + return (false); + if (signature.curve && self.type === 'ecdsa' && + signature.curve !== curve) + return (false); + return (oldVerify(key, signature.toBuffer('asn1'))); + + } else if (typeof (signature) === 'string' || + Buffer.isBuffer(signature)) { + return (oldVerify(key, signature, fmt)); + + /* + * Avoid doing this on valid arguments, walking the prototype + * chain can be quite slow. + */ + } else if (Signature.isSignature(signature, [1, 0])) { + throw (new Error('signature was created by too old ' + + 'a version of sshpk and cannot be verified')); + + } else { + throw (new TypeError('signature must be a string, ' + + 'Buffer, or Signature object')); + } + }; + return (v); +}; + +Key.prototype.createDiffieHellman = function () { + if (this.type === 'rsa') + throw (new Error('RSA keys do not support Diffie-Hellman')); + + return (new DiffieHellman(this)); +}; +Key.prototype.createDH = Key.prototype.createDiffieHellman; + +Key.parse = function (data, format, options) { + if (typeof (data) !== 'string') + assert.buffer(data, 'data'); + if (format === undefined) + format = 'auto'; + assert.string(format, 'format'); + if (typeof (options) === 'string') + options = { filename: options }; + assert.optionalObject(options, 'options'); + if (options === undefined) + options = {}; + assert.optionalString(options.filename, 'options.filename'); + if (options.filename === undefined) + options.filename = '(unnamed)'; + + assert.object(formats[format], 'formats[format]'); + + try { + var k = formats[format].read(data, options); + if (k instanceof PrivateKey) + k = k.toPublic(); + if (!k.comment) + k.comment = options.filename; + return (k); + } catch (e) { + if (e.name === 'KeyEncryptedError') + throw (e); + throw (new KeyParseError(options.filename, format, e)); + } +}; + +Key.isKey = function (obj, ver) { + return (utils.isCompatible(obj, Key, ver)); +}; + +/* + * API versions for Key: + * [1,0] -- initial ver, may take Signature for createVerify or may not + * [1,1] -- added pkcs1, pkcs8 formats + * [1,2] -- added auto, ssh-private, openssh formats + * [1,3] -- added defaultHashAlgorithm + * [1,4] -- added ed support, createDH + * [1,5] -- first explicitly tagged version + * [1,6] -- changed ed25519 part names + */ +Key.prototype._sshpkApiVersion = [1, 6]; + +Key._oldVersionDetect = function (obj) { + assert.func(obj.toBuffer); + assert.func(obj.fingerprint); + if (obj.createDH) + return ([1, 4]); + if (obj.defaultHashAlgorithm) + return ([1, 3]); + if (obj.formats['auto']) + return ([1, 2]); + if (obj.formats['pkcs1']) + return ([1, 1]); + return ([1, 0]); +}; + + +/***/ }), +/* 28 */ +/***/ (function(module, exports) { + +module.exports = require("assert"); + +/***/ }), +/* 29 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.default = nullify; +function nullify(obj = {}) { + if (Array.isArray(obj)) { + for (var _iterator = obj, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { + var _ref; + + if (_isArray) { + if (_i >= _iterator.length) break; + _ref = _iterator[_i++]; + } else { + _i = _iterator.next(); + if (_i.done) break; + _ref = _i.value; + } + + const item = _ref; + + nullify(item); + } + } else if (obj !== null && typeof obj === 'object' || typeof obj === 'function') { + Object.setPrototypeOf(obj, null); + + // for..in can only be applied to 'object', not 'function' + if (typeof obj === 'object') { + for (const key in obj) { + nullify(obj[key]); + } + } + } + + return obj; +} + +/***/ }), +/* 30 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + +const escapeStringRegexp = __webpack_require__(388); +const ansiStyles = __webpack_require__(506); +const stdoutColor = __webpack_require__(598).stdout; + +const template = __webpack_require__(599); + +const isSimpleWindowsTerm = process.platform === 'win32' && !(process.env.TERM || '').toLowerCase().startsWith('xterm'); + +// `supportsColor.level` → `ansiStyles.color[name]` mapping +const levelMapping = ['ansi', 'ansi', 'ansi256', 'ansi16m']; + +// `color-convert` models to exclude from the Chalk API due to conflicts and such +const skipModels = new Set(['gray']); + +const styles = Object.create(null); + +function applyOptions(obj, options) { + options = options || {}; + + // Detect level if not set manually + const scLevel = stdoutColor ? stdoutColor.level : 0; + obj.level = options.level === undefined ? scLevel : options.level; + obj.enabled = 'enabled' in options ? options.enabled : obj.level > 0; +} + +function Chalk(options) { + // We check for this.template here since calling `chalk.constructor()` + // by itself will have a `this` of a previously constructed chalk object + if (!this || !(this instanceof Chalk) || this.template) { + const chalk = {}; + applyOptions(chalk, options); + + chalk.template = function () { + const args = [].slice.call(arguments); + return chalkTag.apply(null, [chalk.template].concat(args)); + }; + + Object.setPrototypeOf(chalk, Chalk.prototype); + Object.setPrototypeOf(chalk.template, chalk); + + chalk.template.constructor = Chalk; + + return chalk.template; + } + + applyOptions(this, options); +} + +// Use bright blue on Windows as the normal blue color is illegible +if (isSimpleWindowsTerm) { + ansiStyles.blue.open = '\u001B[94m'; +} + +for (const key of Object.keys(ansiStyles)) { + ansiStyles[key].closeRe = new RegExp(escapeStringRegexp(ansiStyles[key].close), 'g'); + + styles[key] = { + get() { + const codes = ansiStyles[key]; + return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, key); + } + }; +} + +styles.visible = { + get() { + return build.call(this, this._styles || [], true, 'visible'); + } +}; + +ansiStyles.color.closeRe = new RegExp(escapeStringRegexp(ansiStyles.color.close), 'g'); +for (const model of Object.keys(ansiStyles.color.ansi)) { + if (skipModels.has(model)) { + continue; + } + + styles[model] = { + get() { + const level = this.level; + return function () { + const open = ansiStyles.color[levelMapping[level]][model].apply(null, arguments); + const codes = { + open, + close: ansiStyles.color.close, + closeRe: ansiStyles.color.closeRe + }; + return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, model); + }; + } + }; +} + +ansiStyles.bgColor.closeRe = new RegExp(escapeStringRegexp(ansiStyles.bgColor.close), 'g'); +for (const model of Object.keys(ansiStyles.bgColor.ansi)) { + if (skipModels.has(model)) { + continue; + } + + const bgModel = 'bg' + model[0].toUpperCase() + model.slice(1); + styles[bgModel] = { + get() { + const level = this.level; + return function () { + const open = ansiStyles.bgColor[levelMapping[level]][model].apply(null, arguments); + const codes = { + open, + close: ansiStyles.bgColor.close, + closeRe: ansiStyles.bgColor.closeRe + }; + return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, model); + }; + } + }; +} + +const proto = Object.defineProperties(() => {}, styles); + +function build(_styles, _empty, key) { + const builder = function () { + return applyStyle.apply(builder, arguments); + }; + + builder._styles = _styles; + builder._empty = _empty; + + const self = this; + + Object.defineProperty(builder, 'level', { + enumerable: true, + get() { + return self.level; + }, + set(level) { + self.level = level; + } + }); + + Object.defineProperty(builder, 'enabled', { + enumerable: true, + get() { + return self.enabled; + }, + set(enabled) { + self.enabled = enabled; + } + }); + + // See below for fix regarding invisible grey/dim combination on Windows + builder.hasGrey = this.hasGrey || key === 'gray' || key === 'grey'; + + // `__proto__` is used because we must return a function, but there is + // no way to create a function with a different prototype + builder.__proto__ = proto; // eslint-disable-line no-proto + + return builder; +} + +function applyStyle() { + // Support varags, but simply cast to string in case there's only one arg + const args = arguments; + const argsLen = args.length; + let str = String(arguments[0]); + + if (argsLen === 0) { + return ''; + } + + if (argsLen > 1) { + // Don't slice `arguments`, it prevents V8 optimizations + for (let a = 1; a < argsLen; a++) { + str += ' ' + args[a]; + } + } + + if (!this.enabled || this.level <= 0 || !str) { + return this._empty ? '' : str; + } + + // Turns out that on Windows dimmed gray text becomes invisible in cmd.exe, + // see https://github.com/chalk/chalk/issues/58 + // If we're on Windows and we're dealing with a gray color, temporarily make 'dim' a noop. + const originalDim = ansiStyles.dim.open; + if (isSimpleWindowsTerm && this.hasGrey) { + ansiStyles.dim.open = ''; + } + + for (const code of this._styles.slice().reverse()) { + // Replace any instances already present with a re-opening code + // otherwise only the part of the string until said closing code + // will be colored, and the rest will simply be 'plain'. + str = code.open + str.replace(code.closeRe, code.open) + code.close; + + // Close the styling before a linebreak and reopen + // after next line to fix a bleed issue on macOS + // https://github.com/chalk/chalk/pull/92 + str = str.replace(/\r?\n/g, `${code.close}$&${code.open}`); + } + + // Reset the original `dim` if we changed it to work around the Windows dimmed gray issue + ansiStyles.dim.open = originalDim; + + return str; +} + +function chalkTag(chalk, strings) { + if (!Array.isArray(strings)) { + // If chalk() was called by itself or with a string, + // return the string itself as a string. + return [].slice.call(arguments, 1).join(' '); + } + + const args = [].slice.call(arguments, 2); + const parts = [strings.raw[0]]; + + for (let i = 1; i < strings.length; i++) { + parts.push(String(args[i - 1]).replace(/[{}\\]/g, '\\$&')); + parts.push(String(strings.raw[i])); + } + + return template(chalk, parts.join('')); +} + +Object.defineProperties(Chalk.prototype, styles); + +module.exports = Chalk(); // eslint-disable-line new-cap +module.exports.supportsColor = stdoutColor; +module.exports.default = module.exports; // For TypeScript + + +/***/ }), +/* 31 */ +/***/ (function(module, exports) { + +var core = module.exports = { version: '2.5.7' }; +if (typeof __e == 'number') __e = core; // eslint-disable-line no-undef + + +/***/ }), +/* 32 */ +/***/ (function(module, exports, __webpack_require__) { + +// Copyright 2015 Joyent, Inc. + +var Buffer = __webpack_require__(15).Buffer; + +var algInfo = { + 'dsa': { + parts: ['p', 'q', 'g', 'y'], + sizePart: 'p' + }, + 'rsa': { + parts: ['e', 'n'], + sizePart: 'n' + }, + 'ecdsa': { + parts: ['curve', 'Q'], + sizePart: 'Q' + }, + 'ed25519': { + parts: ['A'], + sizePart: 'A' + } +}; +algInfo['curve25519'] = algInfo['ed25519']; + +var algPrivInfo = { + 'dsa': { + parts: ['p', 'q', 'g', 'y', 'x'] + }, + 'rsa': { + parts: ['n', 'e', 'd', 'iqmp', 'p', 'q'] + }, + 'ecdsa': { + parts: ['curve', 'Q', 'd'] + }, + 'ed25519': { + parts: ['A', 'k'] + } +}; +algPrivInfo['curve25519'] = algPrivInfo['ed25519']; + +var hashAlgs = { + 'md5': true, + 'sha1': true, + 'sha256': true, + 'sha384': true, + 'sha512': true +}; + +/* + * Taken from + * http://csrc.nist.gov/groups/ST/toolkit/documents/dss/NISTReCur.pdf + */ +var curves = { + 'nistp256': { + size: 256, + pkcs8oid: '1.2.840.10045.3.1.7', + p: Buffer.from(('00' + + 'ffffffff 00000001 00000000 00000000' + + '00000000 ffffffff ffffffff ffffffff'). + replace(/ /g, ''), 'hex'), + a: Buffer.from(('00' + + 'FFFFFFFF 00000001 00000000 00000000' + + '00000000 FFFFFFFF FFFFFFFF FFFFFFFC'). + replace(/ /g, ''), 'hex'), + b: Buffer.from(( + '5ac635d8 aa3a93e7 b3ebbd55 769886bc' + + '651d06b0 cc53b0f6 3bce3c3e 27d2604b'). + replace(/ /g, ''), 'hex'), + s: Buffer.from(('00' + + 'c49d3608 86e70493 6a6678e1 139d26b7' + + '819f7e90'). + replace(/ /g, ''), 'hex'), + n: Buffer.from(('00' + + 'ffffffff 00000000 ffffffff ffffffff' + + 'bce6faad a7179e84 f3b9cac2 fc632551'). + replace(/ /g, ''), 'hex'), + G: Buffer.from(('04' + + '6b17d1f2 e12c4247 f8bce6e5 63a440f2' + + '77037d81 2deb33a0 f4a13945 d898c296' + + '4fe342e2 fe1a7f9b 8ee7eb4a 7c0f9e16' + + '2bce3357 6b315ece cbb64068 37bf51f5'). + replace(/ /g, ''), 'hex') + }, + 'nistp384': { + size: 384, + pkcs8oid: '1.3.132.0.34', + p: Buffer.from(('00' + + 'ffffffff ffffffff ffffffff ffffffff' + + 'ffffffff ffffffff ffffffff fffffffe' + + 'ffffffff 00000000 00000000 ffffffff'). + replace(/ /g, ''), 'hex'), + a: Buffer.from(('00' + + 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + + 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE' + + 'FFFFFFFF 00000000 00000000 FFFFFFFC'). + replace(/ /g, ''), 'hex'), + b: Buffer.from(( + 'b3312fa7 e23ee7e4 988e056b e3f82d19' + + '181d9c6e fe814112 0314088f 5013875a' + + 'c656398d 8a2ed19d 2a85c8ed d3ec2aef'). + replace(/ /g, ''), 'hex'), + s: Buffer.from(('00' + + 'a335926a a319a27a 1d00896a 6773a482' + + '7acdac73'). + replace(/ /g, ''), 'hex'), + n: Buffer.from(('00' + + 'ffffffff ffffffff ffffffff ffffffff' + + 'ffffffff ffffffff c7634d81 f4372ddf' + + '581a0db2 48b0a77a ecec196a ccc52973'). + replace(/ /g, ''), 'hex'), + G: Buffer.from(('04' + + 'aa87ca22 be8b0537 8eb1c71e f320ad74' + + '6e1d3b62 8ba79b98 59f741e0 82542a38' + + '5502f25d bf55296c 3a545e38 72760ab7' + + '3617de4a 96262c6f 5d9e98bf 9292dc29' + + 'f8f41dbd 289a147c e9da3113 b5f0b8c0' + + '0a60b1ce 1d7e819d 7a431d7c 90ea0e5f'). + replace(/ /g, ''), 'hex') + }, + 'nistp521': { + size: 521, + pkcs8oid: '1.3.132.0.35', + p: Buffer.from(( + '01ffffff ffffffff ffffffff ffffffff' + + 'ffffffff ffffffff ffffffff ffffffff' + + 'ffffffff ffffffff ffffffff ffffffff' + + 'ffffffff ffffffff ffffffff ffffffff' + + 'ffff').replace(/ /g, ''), 'hex'), + a: Buffer.from(('01FF' + + 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + + 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + + 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + + 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFC'). + replace(/ /g, ''), 'hex'), + b: Buffer.from(('51' + + '953eb961 8e1c9a1f 929a21a0 b68540ee' + + 'a2da725b 99b315f3 b8b48991 8ef109e1' + + '56193951 ec7e937b 1652c0bd 3bb1bf07' + + '3573df88 3d2c34f1 ef451fd4 6b503f00'). + replace(/ /g, ''), 'hex'), + s: Buffer.from(('00' + + 'd09e8800 291cb853 96cc6717 393284aa' + + 'a0da64ba').replace(/ /g, ''), 'hex'), + n: Buffer.from(('01ff' + + 'ffffffff ffffffff ffffffff ffffffff' + + 'ffffffff ffffffff ffffffff fffffffa' + + '51868783 bf2f966b 7fcc0148 f709a5d0' + + '3bb5c9b8 899c47ae bb6fb71e 91386409'). + replace(/ /g, ''), 'hex'), + G: Buffer.from(('04' + + '00c6 858e06b7 0404e9cd 9e3ecb66 2395b442' + + '9c648139 053fb521 f828af60 6b4d3dba' + + 'a14b5e77 efe75928 fe1dc127 a2ffa8de' + + '3348b3c1 856a429b f97e7e31 c2e5bd66' + + '0118 39296a78 9a3bc004 5c8a5fb4 2c7d1bd9' + + '98f54449 579b4468 17afbd17 273e662c' + + '97ee7299 5ef42640 c550b901 3fad0761' + + '353c7086 a272c240 88be9476 9fd16650'). + replace(/ /g, ''), 'hex') + } +}; + +module.exports = { + info: algInfo, + privInfo: algPrivInfo, + hashAlgs: hashAlgs, + curves: curves +}; + + +/***/ }), +/* 33 */ +/***/ (function(module, exports, __webpack_require__) { + +// Copyright 2017 Joyent, Inc. + +module.exports = PrivateKey; + +var assert = __webpack_require__(16); +var Buffer = __webpack_require__(15).Buffer; +var algs = __webpack_require__(32); +var crypto = __webpack_require__(11); +var Fingerprint = __webpack_require__(156); +var Signature = __webpack_require__(75); +var errs = __webpack_require__(74); +var util = __webpack_require__(3); +var utils = __webpack_require__(26); +var dhe = __webpack_require__(325); +var generateECDSA = dhe.generateECDSA; +var generateED25519 = dhe.generateED25519; +var edCompat; +var nacl; + +try { + edCompat = __webpack_require__(454); +} catch (e) { + /* Just continue through, and bail out if we try to use it. */ +} + +var Key = __webpack_require__(27); + +var InvalidAlgorithmError = errs.InvalidAlgorithmError; +var KeyParseError = errs.KeyParseError; +var KeyEncryptedError = errs.KeyEncryptedError; + +var formats = {}; +formats['auto'] = __webpack_require__(455); +formats['pem'] = __webpack_require__(86); +formats['pkcs1'] = __webpack_require__(327); +formats['pkcs8'] = __webpack_require__(157); +formats['rfc4253'] = __webpack_require__(103); +formats['ssh-private'] = __webpack_require__(192); +formats['openssh'] = formats['ssh-private']; +formats['ssh'] = formats['ssh-private']; +formats['dnssec'] = __webpack_require__(326); + +function PrivateKey(opts) { + assert.object(opts, 'options'); + Key.call(this, opts); + + this._pubCache = undefined; +} +util.inherits(PrivateKey, Key); + +PrivateKey.formats = formats; + +PrivateKey.prototype.toBuffer = function (format, options) { + if (format === undefined) + format = 'pkcs1'; + assert.string(format, 'format'); + assert.object(formats[format], 'formats[format]'); + assert.optionalObject(options, 'options'); + + return (formats[format].write(this, options)); +}; + +PrivateKey.prototype.hash = function (algo) { + return (this.toPublic().hash(algo)); +}; + +PrivateKey.prototype.toPublic = function () { + if (this._pubCache) + return (this._pubCache); + + var algInfo = algs.info[this.type]; + var pubParts = []; + for (var i = 0; i < algInfo.parts.length; ++i) { + var p = algInfo.parts[i]; + pubParts.push(this.part[p]); + } + + this._pubCache = new Key({ + type: this.type, + source: this, + parts: pubParts + }); + if (this.comment) + this._pubCache.comment = this.comment; + return (this._pubCache); +}; + +PrivateKey.prototype.derive = function (newType) { + assert.string(newType, 'type'); + var priv, pub, pair; + + if (this.type === 'ed25519' && newType === 'curve25519') { + if (nacl === undefined) + nacl = __webpack_require__(76); + + priv = this.part.k.data; + if (priv[0] === 0x00) + priv = priv.slice(1); + + pair = nacl.box.keyPair.fromSecretKey(new Uint8Array(priv)); + pub = Buffer.from(pair.publicKey); + + return (new PrivateKey({ + type: 'curve25519', + parts: [ + { name: 'A', data: utils.mpNormalize(pub) }, + { name: 'k', data: utils.mpNormalize(priv) } + ] + })); + } else if (this.type === 'curve25519' && newType === 'ed25519') { + if (nacl === undefined) + nacl = __webpack_require__(76); + + priv = this.part.k.data; + if (priv[0] === 0x00) + priv = priv.slice(1); + + pair = nacl.sign.keyPair.fromSeed(new Uint8Array(priv)); + pub = Buffer.from(pair.publicKey); + + return (new PrivateKey({ + type: 'ed25519', + parts: [ + { name: 'A', data: utils.mpNormalize(pub) }, + { name: 'k', data: utils.mpNormalize(priv) } + ] + })); + } + throw (new Error('Key derivation not supported from ' + this.type + + ' to ' + newType)); +}; + +PrivateKey.prototype.createVerify = function (hashAlgo) { + return (this.toPublic().createVerify(hashAlgo)); +}; + +PrivateKey.prototype.createSign = function (hashAlgo) { + if (hashAlgo === undefined) + hashAlgo = this.defaultHashAlgorithm(); + assert.string(hashAlgo, 'hash algorithm'); + + /* ED25519 is not supported by OpenSSL, use a javascript impl. */ + if (this.type === 'ed25519' && edCompat !== undefined) + return (new edCompat.Signer(this, hashAlgo)); + if (this.type === 'curve25519') + throw (new Error('Curve25519 keys are not suitable for ' + + 'signing or verification')); + + var v, nm, err; + try { + nm = hashAlgo.toUpperCase(); + v = crypto.createSign(nm); + } catch (e) { + err = e; + } + if (v === undefined || (err instanceof Error && + err.message.match(/Unknown message digest/))) { + nm = 'RSA-'; + nm += hashAlgo.toUpperCase(); + v = crypto.createSign(nm); + } + assert.ok(v, 'failed to create verifier'); + var oldSign = v.sign.bind(v); + var key = this.toBuffer('pkcs1'); + var type = this.type; + var curve = this.curve; + v.sign = function () { + var sig = oldSign(key); + if (typeof (sig) === 'string') + sig = Buffer.from(sig, 'binary'); + sig = Signature.parse(sig, type, 'asn1'); + sig.hashAlgorithm = hashAlgo; + sig.curve = curve; + return (sig); + }; + return (v); +}; + +PrivateKey.parse = function (data, format, options) { + if (typeof (data) !== 'string') + assert.buffer(data, 'data'); + if (format === undefined) + format = 'auto'; + assert.string(format, 'format'); + if (typeof (options) === 'string') + options = { filename: options }; + assert.optionalObject(options, 'options'); + if (options === undefined) + options = {}; + assert.optionalString(options.filename, 'options.filename'); + if (options.filename === undefined) + options.filename = '(unnamed)'; + + assert.object(formats[format], 'formats[format]'); + + try { + var k = formats[format].read(data, options); + assert.ok(k instanceof PrivateKey, 'key is not a private key'); + if (!k.comment) + k.comment = options.filename; + return (k); + } catch (e) { + if (e.name === 'KeyEncryptedError') + throw (e); + throw (new KeyParseError(options.filename, format, e)); + } +}; + +PrivateKey.isPrivateKey = function (obj, ver) { + return (utils.isCompatible(obj, PrivateKey, ver)); +}; + +PrivateKey.generate = function (type, options) { + if (options === undefined) + options = {}; + assert.object(options, 'options'); + + switch (type) { + case 'ecdsa': + if (options.curve === undefined) + options.curve = 'nistp256'; + assert.string(options.curve, 'options.curve'); + return (generateECDSA(options.curve)); + case 'ed25519': + return (generateED25519()); + default: + throw (new Error('Key generation not supported with key ' + + 'type "' + type + '"')); + } +}; + +/* + * API versions for PrivateKey: + * [1,0] -- initial ver + * [1,1] -- added auto, pkcs[18], openssh/ssh-private formats + * [1,2] -- added defaultHashAlgorithm + * [1,3] -- added derive, ed, createDH + * [1,4] -- first tagged version + * [1,5] -- changed ed25519 part names and format + */ +PrivateKey.prototype._sshpkApiVersion = [1, 5]; + +PrivateKey._oldVersionDetect = function (obj) { + assert.func(obj.toPublic); + assert.func(obj.createSign); + if (obj.derive) + return ([1, 3]); + if (obj.defaultHashAlgorithm) + return ([1, 2]); + if (obj.formats['auto']) + return ([1, 1]); + return ([1, 0]); +}; + + +/***/ }), +/* 34 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.wrapLifecycle = exports.run = exports.install = exports.Install = undefined; + +var _extends2; + +function _load_extends() { + return _extends2 = _interopRequireDefault(__webpack_require__(21)); +} + +var _asyncToGenerator2; + +function _load_asyncToGenerator() { + return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); +} + +let install = exports.install = (() => { + var _ref29 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, reporter, flags, lockfile) { + yield wrapLifecycle(config, flags, (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const install = new Install(flags, config, reporter, lockfile); + yield install.init(); + })); + }); + + return function install(_x7, _x8, _x9, _x10) { + return _ref29.apply(this, arguments); + }; +})(); + +let run = exports.run = (() => { + var _ref31 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, reporter, flags, args) { + let lockfile; + let error = 'installCommandRenamed'; + if (flags.lockfile === false) { + lockfile = new (_lockfile || _load_lockfile()).default(); + } else { + lockfile = yield (_lockfile || _load_lockfile()).default.fromDirectory(config.lockfileFolder, reporter); + } + + if (args.length) { + const exampleArgs = args.slice(); + + if (flags.saveDev) { + exampleArgs.push('--dev'); + } + if (flags.savePeer) { + exampleArgs.push('--peer'); + } + if (flags.saveOptional) { + exampleArgs.push('--optional'); + } + if (flags.saveExact) { + exampleArgs.push('--exact'); + } + if (flags.saveTilde) { + exampleArgs.push('--tilde'); + } + let command = 'add'; + if (flags.global) { + error = 'globalFlagRemoved'; + command = 'global add'; + } + throw new (_errors || _load_errors()).MessageError(reporter.lang(error, `yarn ${command} ${exampleArgs.join(' ')}`)); + } + + yield install(config, reporter, flags, lockfile); + }); + + return function run(_x11, _x12, _x13, _x14) { + return _ref31.apply(this, arguments); + }; +})(); + +let wrapLifecycle = exports.wrapLifecycle = (() => { + var _ref32 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, flags, factory) { + yield config.executeLifecycleScript('preinstall'); + + yield factory(); + + // npm behaviour, seems kinda funky but yay compatibility + yield config.executeLifecycleScript('install'); + yield config.executeLifecycleScript('postinstall'); + + if (!config.production) { + if (!config.disablePrepublish) { + yield config.executeLifecycleScript('prepublish'); + } + yield config.executeLifecycleScript('prepare'); + } + }); + + return function wrapLifecycle(_x15, _x16, _x17) { + return _ref32.apply(this, arguments); + }; +})(); + +exports.hasWrapper = hasWrapper; +exports.setFlags = setFlags; + +var _objectPath; + +function _load_objectPath() { + return _objectPath = _interopRequireDefault(__webpack_require__(304)); +} + +var _hooks; + +function _load_hooks() { + return _hooks = __webpack_require__(374); +} + +var _index; + +function _load_index() { + return _index = _interopRequireDefault(__webpack_require__(220)); +} + +var _errors; + +function _load_errors() { + return _errors = __webpack_require__(6); +} + +var _integrityChecker; + +function _load_integrityChecker() { + return _integrityChecker = _interopRequireDefault(__webpack_require__(208)); +} + +var _lockfile; + +function _load_lockfile() { + return _lockfile = _interopRequireDefault(__webpack_require__(19)); +} + +var _lockfile2; + +function _load_lockfile2() { + return _lockfile2 = __webpack_require__(19); +} + +var _packageFetcher; + +function _load_packageFetcher() { + return _packageFetcher = _interopRequireWildcard(__webpack_require__(210)); +} + +var _packageInstallScripts; + +function _load_packageInstallScripts() { + return _packageInstallScripts = _interopRequireDefault(__webpack_require__(557)); +} + +var _packageCompatibility; + +function _load_packageCompatibility() { + return _packageCompatibility = _interopRequireWildcard(__webpack_require__(209)); +} + +var _packageResolver; + +function _load_packageResolver() { + return _packageResolver = _interopRequireDefault(__webpack_require__(366)); +} + +var _packageLinker; + +function _load_packageLinker() { + return _packageLinker = _interopRequireDefault(__webpack_require__(211)); +} + +var _index2; + +function _load_index2() { + return _index2 = __webpack_require__(57); +} + +var _index3; + +function _load_index3() { + return _index3 = __webpack_require__(78); +} + +var _autoclean; + +function _load_autoclean() { + return _autoclean = __webpack_require__(354); +} + +var _constants; + +function _load_constants() { + return _constants = _interopRequireWildcard(__webpack_require__(8)); +} + +var _normalizePattern; + +function _load_normalizePattern() { + return _normalizePattern = __webpack_require__(37); +} + +var _fs; + +function _load_fs() { + return _fs = _interopRequireWildcard(__webpack_require__(4)); +} + +var _map; + +function _load_map() { + return _map = _interopRequireDefault(__webpack_require__(29)); +} + +var _yarnVersion; + +function _load_yarnVersion() { + return _yarnVersion = __webpack_require__(120); +} + +var _generatePnpMap; + +function _load_generatePnpMap() { + return _generatePnpMap = __webpack_require__(579); +} + +var _workspaceLayout; + +function _load_workspaceLayout() { + return _workspaceLayout = _interopRequireDefault(__webpack_require__(90)); +} + +var _resolutionMap; + +function _load_resolutionMap() { + return _resolutionMap = _interopRequireDefault(__webpack_require__(214)); +} + +var _guessName; + +function _load_guessName() { + return _guessName = _interopRequireDefault(__webpack_require__(169)); +} + +var _audit; + +function _load_audit() { + return _audit = _interopRequireDefault(__webpack_require__(353)); +} + +function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } + +function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } + +const deepEqual = __webpack_require__(631); + +const emoji = __webpack_require__(302); +const invariant = __webpack_require__(9); +const path = __webpack_require__(0); +const semver = __webpack_require__(22); +const uuid = __webpack_require__(119); +const ssri = __webpack_require__(65); + +const ONE_DAY = 1000 * 60 * 60 * 24; + +/** + * Try and detect the installation method for Yarn and provide a command to update it with. + */ + +function getUpdateCommand(installationMethod) { + if (installationMethod === 'tar') { + return `curl --compressed -o- -L ${(_constants || _load_constants()).YARN_INSTALLER_SH} | bash`; + } + + if (installationMethod === 'homebrew') { + return 'brew upgrade yarn'; + } + + if (installationMethod === 'deb') { + return 'sudo apt-get update && sudo apt-get install yarn'; + } + + if (installationMethod === 'rpm') { + return 'sudo yum install yarn'; + } + + if (installationMethod === 'npm') { + return 'npm install --global yarn'; + } + + if (installationMethod === 'chocolatey') { + return 'choco upgrade yarn'; + } + + if (installationMethod === 'apk') { + return 'apk update && apk add -u yarn'; + } + + if (installationMethod === 'portage') { + return 'sudo emerge --sync && sudo emerge -au sys-apps/yarn'; + } + + return null; +} + +function getUpdateInstaller(installationMethod) { + // Windows + if (installationMethod === 'msi') { + return (_constants || _load_constants()).YARN_INSTALLER_MSI; + } + + return null; +} + +function normalizeFlags(config, rawFlags) { + const flags = { + // install + har: !!rawFlags.har, + ignorePlatform: !!rawFlags.ignorePlatform, + ignoreEngines: !!rawFlags.ignoreEngines, + ignoreScripts: !!rawFlags.ignoreScripts, + ignoreOptional: !!rawFlags.ignoreOptional, + force: !!rawFlags.force, + flat: !!rawFlags.flat, + lockfile: rawFlags.lockfile !== false, + pureLockfile: !!rawFlags.pureLockfile, + updateChecksums: !!rawFlags.updateChecksums, + skipIntegrityCheck: !!rawFlags.skipIntegrityCheck, + frozenLockfile: !!rawFlags.frozenLockfile, + linkDuplicates: !!rawFlags.linkDuplicates, + checkFiles: !!rawFlags.checkFiles, + audit: !!rawFlags.audit, + + // add + peer: !!rawFlags.peer, + dev: !!rawFlags.dev, + optional: !!rawFlags.optional, + exact: !!rawFlags.exact, + tilde: !!rawFlags.tilde, + ignoreWorkspaceRootCheck: !!rawFlags.ignoreWorkspaceRootCheck, + + // outdated, update-interactive + includeWorkspaceDeps: !!rawFlags.includeWorkspaceDeps, + + // add, remove, update + workspaceRootIsCwd: rawFlags.workspaceRootIsCwd !== false + }; + + if (config.getOption('ignore-scripts')) { + flags.ignoreScripts = true; + } + + if (config.getOption('ignore-platform')) { + flags.ignorePlatform = true; + } + + if (config.getOption('ignore-engines')) { + flags.ignoreEngines = true; + } + + if (config.getOption('ignore-optional')) { + flags.ignoreOptional = true; + } + + if (config.getOption('force')) { + flags.force = true; + } + + return flags; +} + +class Install { + constructor(flags, config, reporter, lockfile) { + this.rootManifestRegistries = []; + this.rootPatternsToOrigin = (0, (_map || _load_map()).default)(); + this.lockfile = lockfile; + this.reporter = reporter; + this.config = config; + this.flags = normalizeFlags(config, flags); + this.resolutions = (0, (_map || _load_map()).default)(); // Legacy resolutions field used for flat install mode + this.resolutionMap = new (_resolutionMap || _load_resolutionMap()).default(config); // Selective resolutions for nested dependencies + this.resolver = new (_packageResolver || _load_packageResolver()).default(config, lockfile, this.resolutionMap); + this.integrityChecker = new (_integrityChecker || _load_integrityChecker()).default(config); + this.linker = new (_packageLinker || _load_packageLinker()).default(config, this.resolver); + this.scripts = new (_packageInstallScripts || _load_packageInstallScripts()).default(config, this.resolver, this.flags.force); + } + + /** + * Create a list of dependency requests from the current directories manifests. + */ + + fetchRequestFromCwd(excludePatterns = [], ignoreUnusedPatterns = false) { + var _this = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const patterns = []; + const deps = []; + let resolutionDeps = []; + const manifest = {}; + + const ignorePatterns = []; + const usedPatterns = []; + let workspaceLayout; + + // some commands should always run in the context of the entire workspace + const cwd = _this.flags.includeWorkspaceDeps || _this.flags.workspaceRootIsCwd ? _this.config.lockfileFolder : _this.config.cwd; + + // non-workspaces are always root, otherwise check for workspace root + const cwdIsRoot = !_this.config.workspaceRootFolder || _this.config.lockfileFolder === cwd; + + // exclude package names that are in install args + const excludeNames = []; + for (var _iterator = excludePatterns, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { + var _ref; + + if (_isArray) { + if (_i >= _iterator.length) break; + _ref = _iterator[_i++]; + } else { + _i = _iterator.next(); + if (_i.done) break; + _ref = _i.value; + } + + const pattern = _ref; + + if ((0, (_index3 || _load_index3()).getExoticResolver)(pattern)) { + excludeNames.push((0, (_guessName || _load_guessName()).default)(pattern)); + } else { + // extract the name + const parts = (0, (_normalizePattern || _load_normalizePattern()).normalizePattern)(pattern); + excludeNames.push(parts.name); + } + } + + const stripExcluded = function stripExcluded(manifest) { + for (var _iterator2 = excludeNames, _isArray2 = Array.isArray(_iterator2), _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) { + var _ref2; + + if (_isArray2) { + if (_i2 >= _iterator2.length) break; + _ref2 = _iterator2[_i2++]; + } else { + _i2 = _iterator2.next(); + if (_i2.done) break; + _ref2 = _i2.value; + } + + const exclude = _ref2; + + if (manifest.dependencies && manifest.dependencies[exclude]) { + delete manifest.dependencies[exclude]; + } + if (manifest.devDependencies && manifest.devDependencies[exclude]) { + delete manifest.devDependencies[exclude]; + } + if (manifest.optionalDependencies && manifest.optionalDependencies[exclude]) { + delete manifest.optionalDependencies[exclude]; + } + } + }; + + for (var _iterator3 = Object.keys((_index2 || _load_index2()).registries), _isArray3 = Array.isArray(_iterator3), _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) { + var _ref3; + + if (_isArray3) { + if (_i3 >= _iterator3.length) break; + _ref3 = _iterator3[_i3++]; + } else { + _i3 = _iterator3.next(); + if (_i3.done) break; + _ref3 = _i3.value; + } + + const registry = _ref3; + + const filename = (_index2 || _load_index2()).registries[registry].filename; + + const loc = path.join(cwd, filename); + if (!(yield (_fs || _load_fs()).exists(loc))) { + continue; + } + + _this.rootManifestRegistries.push(registry); + + const projectManifestJson = yield _this.config.readJson(loc); + yield (0, (_index || _load_index()).default)(projectManifestJson, cwd, _this.config, cwdIsRoot); + + Object.assign(_this.resolutions, projectManifestJson.resolutions); + Object.assign(manifest, projectManifestJson); + + _this.resolutionMap.init(_this.resolutions); + for (var _iterator4 = Object.keys(_this.resolutionMap.resolutionsByPackage), _isArray4 = Array.isArray(_iterator4), _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) { + var _ref4; + + if (_isArray4) { + if (_i4 >= _iterator4.length) break; + _ref4 = _iterator4[_i4++]; + } else { + _i4 = _iterator4.next(); + if (_i4.done) break; + _ref4 = _i4.value; + } + + const packageName = _ref4; + + const optional = (_objectPath || _load_objectPath()).default.has(manifest.optionalDependencies, packageName) && _this.flags.ignoreOptional; + for (var _iterator8 = _this.resolutionMap.resolutionsByPackage[packageName], _isArray8 = Array.isArray(_iterator8), _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) { + var _ref9; + + if (_isArray8) { + if (_i8 >= _iterator8.length) break; + _ref9 = _iterator8[_i8++]; + } else { + _i8 = _iterator8.next(); + if (_i8.done) break; + _ref9 = _i8.value; + } + + const _ref8 = _ref9; + const pattern = _ref8.pattern; + + resolutionDeps = [...resolutionDeps, { registry, pattern, optional, hint: 'resolution' }]; + } + } + + const pushDeps = function pushDeps(depType, manifest, { hint, optional }, isUsed) { + if (ignoreUnusedPatterns && !isUsed) { + return; + } + // We only take unused dependencies into consideration to get deterministic hoisting. + // Since flat mode doesn't care about hoisting and everything is top level and specified then we can safely + // leave these out. + if (_this.flags.flat && !isUsed) { + return; + } + const depMap = manifest[depType]; + for (const name in depMap) { + if (excludeNames.indexOf(name) >= 0) { + continue; + } + + let pattern = name; + if (!_this.lockfile.getLocked(pattern)) { + // when we use --save we save the dependency to the lockfile with just the name rather than the + // version combo + pattern += '@' + depMap[name]; + } + + // normalization made sure packages are mentioned only once + if (isUsed) { + usedPatterns.push(pattern); + } else { + ignorePatterns.push(pattern); + } + + _this.rootPatternsToOrigin[pattern] = depType; + patterns.push(pattern); + deps.push({ pattern, registry, hint, optional, workspaceName: manifest.name, workspaceLoc: manifest._loc }); + } + }; + + if (cwdIsRoot) { + pushDeps('dependencies', projectManifestJson, { hint: null, optional: false }, true); + pushDeps('devDependencies', projectManifestJson, { hint: 'dev', optional: false }, !_this.config.production); + pushDeps('optionalDependencies', projectManifestJson, { hint: 'optional', optional: true }, true); + } + + if (_this.config.workspaceRootFolder) { + const workspaceLoc = cwdIsRoot ? loc : path.join(_this.config.lockfileFolder, filename); + const workspacesRoot = path.dirname(workspaceLoc); + + let workspaceManifestJson = projectManifestJson; + if (!cwdIsRoot) { + // the manifest we read before was a child workspace, so get the root + workspaceManifestJson = yield _this.config.readJson(workspaceLoc); + yield (0, (_index || _load_index()).default)(workspaceManifestJson, workspacesRoot, _this.config, true); + } + + const workspaces = yield _this.config.resolveWorkspaces(workspacesRoot, workspaceManifestJson); + workspaceLayout = new (_workspaceLayout || _load_workspaceLayout()).default(workspaces, _this.config); + + // add virtual manifest that depends on all workspaces, this way package hoisters and resolvers will work fine + const workspaceDependencies = (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.dependencies); + for (var _iterator5 = Object.keys(workspaces), _isArray5 = Array.isArray(_iterator5), _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) { + var _ref5; + + if (_isArray5) { + if (_i5 >= _iterator5.length) break; + _ref5 = _iterator5[_i5++]; + } else { + _i5 = _iterator5.next(); + if (_i5.done) break; + _ref5 = _i5.value; + } + + const workspaceName = _ref5; + + const workspaceManifest = workspaces[workspaceName].manifest; + workspaceDependencies[workspaceName] = workspaceManifest.version; + + // include dependencies from all workspaces + if (_this.flags.includeWorkspaceDeps) { + pushDeps('dependencies', workspaceManifest, { hint: null, optional: false }, true); + pushDeps('devDependencies', workspaceManifest, { hint: 'dev', optional: false }, !_this.config.production); + pushDeps('optionalDependencies', workspaceManifest, { hint: 'optional', optional: true }, true); + } + } + const virtualDependencyManifest = { + _uid: '', + name: `workspace-aggregator-${uuid.v4()}`, + version: '1.0.0', + _registry: 'npm', + _loc: workspacesRoot, + dependencies: workspaceDependencies, + devDependencies: (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.devDependencies), + optionalDependencies: (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.optionalDependencies), + private: workspaceManifestJson.private, + workspaces: workspaceManifestJson.workspaces + }; + workspaceLayout.virtualManifestName = virtualDependencyManifest.name; + const virtualDep = {}; + virtualDep[virtualDependencyManifest.name] = virtualDependencyManifest.version; + workspaces[virtualDependencyManifest.name] = { loc: workspacesRoot, manifest: virtualDependencyManifest }; + + // ensure dependencies that should be excluded are stripped from the correct manifest + stripExcluded(cwdIsRoot ? virtualDependencyManifest : workspaces[projectManifestJson.name].manifest); + + pushDeps('workspaces', { workspaces: virtualDep }, { hint: 'workspaces', optional: false }, true); + + const implicitWorkspaceDependencies = (0, (_extends2 || _load_extends()).default)({}, workspaceDependencies); + + for (var _iterator6 = (_constants || _load_constants()).OWNED_DEPENDENCY_TYPES, _isArray6 = Array.isArray(_iterator6), _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) { + var _ref6; + + if (_isArray6) { + if (_i6 >= _iterator6.length) break; + _ref6 = _iterator6[_i6++]; + } else { + _i6 = _iterator6.next(); + if (_i6.done) break; + _ref6 = _i6.value; + } + + const type = _ref6; + + for (var _iterator7 = Object.keys(projectManifestJson[type] || {}), _isArray7 = Array.isArray(_iterator7), _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) { + var _ref7; + + if (_isArray7) { + if (_i7 >= _iterator7.length) break; + _ref7 = _iterator7[_i7++]; + } else { + _i7 = _iterator7.next(); + if (_i7.done) break; + _ref7 = _i7.value; + } + + const dependencyName = _ref7; + + delete implicitWorkspaceDependencies[dependencyName]; + } + } + + pushDeps('dependencies', { dependencies: implicitWorkspaceDependencies }, { hint: 'workspaces', optional: false }, true); + } + + break; + } + + // inherit root flat flag + if (manifest.flat) { + _this.flags.flat = true; + } + + return { + requests: [...resolutionDeps, ...deps], + patterns, + manifest, + usedPatterns, + ignorePatterns, + workspaceLayout + }; + })(); + } + + /** + * TODO description + */ + + prepareRequests(requests) { + return requests; + } + + preparePatterns(patterns) { + return patterns; + } + preparePatternsForLinking(patterns, cwdManifest, cwdIsRoot) { + return patterns; + } + + prepareManifests() { + var _this2 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const manifests = yield _this2.config.getRootManifests(); + return manifests; + })(); + } + + bailout(patterns, workspaceLayout) { + var _this3 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + // We don't want to skip the audit - it could yield important errors + if (_this3.flags.audit) { + return false; + } + // PNP is so fast that the integrity check isn't pertinent + if (_this3.config.plugnplayEnabled) { + return false; + } + if (_this3.flags.skipIntegrityCheck || _this3.flags.force) { + return false; + } + const lockfileCache = _this3.lockfile.cache; + if (!lockfileCache) { + return false; + } + const lockfileClean = _this3.lockfile.parseResultType === 'success'; + const match = yield _this3.integrityChecker.check(patterns, lockfileCache, _this3.flags, workspaceLayout); + if (_this3.flags.frozenLockfile && (!lockfileClean || match.missingPatterns.length > 0)) { + throw new (_errors || _load_errors()).MessageError(_this3.reporter.lang('frozenLockfileError')); + } + + const haveLockfile = yield (_fs || _load_fs()).exists(path.join(_this3.config.lockfileFolder, (_constants || _load_constants()).LOCKFILE_FILENAME)); + + const lockfileIntegrityPresent = !_this3.lockfile.hasEntriesExistWithoutIntegrity(); + const integrityBailout = lockfileIntegrityPresent || !_this3.config.autoAddIntegrity; + + if (match.integrityMatches && haveLockfile && lockfileClean && integrityBailout) { + _this3.reporter.success(_this3.reporter.lang('upToDate')); + return true; + } + + if (match.integrityFileMissing && haveLockfile) { + // Integrity file missing, force script installations + _this3.scripts.setForce(true); + return false; + } + + if (match.hardRefreshRequired) { + // e.g. node version doesn't match, force script installations + _this3.scripts.setForce(true); + return false; + } + + if (!patterns.length && !match.integrityFileMissing) { + _this3.reporter.success(_this3.reporter.lang('nothingToInstall')); + yield _this3.createEmptyManifestFolders(); + yield _this3.saveLockfileAndIntegrity(patterns, workspaceLayout); + return true; + } + + return false; + })(); + } + + /** + * Produce empty folders for all used root manifests. + */ + + createEmptyManifestFolders() { + var _this4 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + if (_this4.config.modulesFolder) { + // already created + return; + } + + for (var _iterator9 = _this4.rootManifestRegistries, _isArray9 = Array.isArray(_iterator9), _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) { + var _ref10; + + if (_isArray9) { + if (_i9 >= _iterator9.length) break; + _ref10 = _iterator9[_i9++]; + } else { + _i9 = _iterator9.next(); + if (_i9.done) break; + _ref10 = _i9.value; + } + + const registryName = _ref10; + const folder = _this4.config.registries[registryName].folder; + + yield (_fs || _load_fs()).mkdirp(path.join(_this4.config.lockfileFolder, folder)); + } + })(); + } + + /** + * TODO description + */ + + markIgnored(patterns) { + for (var _iterator10 = patterns, _isArray10 = Array.isArray(_iterator10), _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) { + var _ref11; + + if (_isArray10) { + if (_i10 >= _iterator10.length) break; + _ref11 = _iterator10[_i10++]; + } else { + _i10 = _iterator10.next(); + if (_i10.done) break; + _ref11 = _i10.value; + } + + const pattern = _ref11; + + const manifest = this.resolver.getStrictResolvedPattern(pattern); + const ref = manifest._reference; + invariant(ref, 'expected package reference'); + + // just mark the package as ignored. if the package is used by a required package, the hoister + // will take care of that. + ref.ignore = true; + } + } + + /** + * helper method that gets only recent manifests + * used by global.ls command + */ + getFlattenedDeps() { + var _this5 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + var _ref12 = yield _this5.fetchRequestFromCwd(); + + const depRequests = _ref12.requests, + rawPatterns = _ref12.patterns; + + + yield _this5.resolver.init(depRequests, {}); + + const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this5.resolver.getManifests(), _this5.config); + _this5.resolver.updateManifests(manifests); + + return _this5.flatten(rawPatterns); + })(); + } + + /** + * TODO description + */ + + init() { + var _this6 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + _this6.checkUpdate(); + + // warn if we have a shrinkwrap + if (yield (_fs || _load_fs()).exists(path.join(_this6.config.lockfileFolder, (_constants || _load_constants()).NPM_SHRINKWRAP_FILENAME))) { + _this6.reporter.warn(_this6.reporter.lang('shrinkwrapWarning')); + } + + // warn if we have an npm lockfile + if (yield (_fs || _load_fs()).exists(path.join(_this6.config.lockfileFolder, (_constants || _load_constants()).NPM_LOCK_FILENAME))) { + _this6.reporter.warn(_this6.reporter.lang('npmLockfileWarning')); + } + + if (_this6.config.plugnplayEnabled) { + _this6.reporter.info(_this6.reporter.lang('plugnplaySuggestV2L1')); + _this6.reporter.info(_this6.reporter.lang('plugnplaySuggestV2L2')); + } + + let flattenedTopLevelPatterns = []; + const steps = []; + + var _ref13 = yield _this6.fetchRequestFromCwd(); + + const depRequests = _ref13.requests, + rawPatterns = _ref13.patterns, + ignorePatterns = _ref13.ignorePatterns, + workspaceLayout = _ref13.workspaceLayout, + manifest = _ref13.manifest; + + let topLevelPatterns = []; + + const artifacts = yield _this6.integrityChecker.getArtifacts(); + if (artifacts) { + _this6.linker.setArtifacts(artifacts); + _this6.scripts.setArtifacts(artifacts); + } + + if ((_packageCompatibility || _load_packageCompatibility()).shouldCheck(manifest, _this6.flags)) { + steps.push((() => { + var _ref14 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { + _this6.reporter.step(curr, total, _this6.reporter.lang('checkingManifest'), emoji.get('mag')); + yield _this6.checkCompatibility(); + }); + + return function (_x, _x2) { + return _ref14.apply(this, arguments); + }; + })()); + } + + const audit = new (_audit || _load_audit()).default(_this6.config, _this6.reporter, { groups: (_constants || _load_constants()).OWNED_DEPENDENCY_TYPES }); + let auditFoundProblems = false; + + steps.push(function (curr, total) { + return (0, (_hooks || _load_hooks()).callThroughHook)('resolveStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + _this6.reporter.step(curr, total, _this6.reporter.lang('resolvingPackages'), emoji.get('mag')); + yield _this6.resolver.init(_this6.prepareRequests(depRequests), { + isFlat: _this6.flags.flat, + isFrozen: _this6.flags.frozenLockfile, + workspaceLayout + }); + topLevelPatterns = _this6.preparePatterns(rawPatterns); + flattenedTopLevelPatterns = yield _this6.flatten(topLevelPatterns); + return { bailout: !_this6.flags.audit && (yield _this6.bailout(topLevelPatterns, workspaceLayout)) }; + })); + }); + + if (_this6.flags.audit) { + steps.push(function (curr, total) { + return (0, (_hooks || _load_hooks()).callThroughHook)('auditStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + _this6.reporter.step(curr, total, _this6.reporter.lang('auditRunning'), emoji.get('mag')); + if (_this6.flags.offline) { + _this6.reporter.warn(_this6.reporter.lang('auditOffline')); + return { bailout: false }; + } + const preparedManifests = yield _this6.prepareManifests(); + // $FlowFixMe - Flow considers `m` in the map operation to be "mixed", so does not recognize `m.object` + const mergedManifest = Object.assign({}, ...Object.values(preparedManifests).map(function (m) { + return m.object; + })); + const auditVulnerabilityCounts = yield audit.performAudit(mergedManifest, _this6.lockfile, _this6.resolver, _this6.linker, topLevelPatterns); + auditFoundProblems = auditVulnerabilityCounts.info || auditVulnerabilityCounts.low || auditVulnerabilityCounts.moderate || auditVulnerabilityCounts.high || auditVulnerabilityCounts.critical; + return { bailout: yield _this6.bailout(topLevelPatterns, workspaceLayout) }; + })); + }); + } + + steps.push(function (curr, total) { + return (0, (_hooks || _load_hooks()).callThroughHook)('fetchStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + _this6.markIgnored(ignorePatterns); + _this6.reporter.step(curr, total, _this6.reporter.lang('fetchingPackages'), emoji.get('truck')); + const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this6.resolver.getManifests(), _this6.config); + _this6.resolver.updateManifests(manifests); + yield (_packageCompatibility || _load_packageCompatibility()).check(_this6.resolver.getManifests(), _this6.config, _this6.flags.ignoreEngines); + })); + }); + + steps.push(function (curr, total) { + return (0, (_hooks || _load_hooks()).callThroughHook)('linkStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + // remove integrity hash to make this operation atomic + yield _this6.integrityChecker.removeIntegrityFile(); + _this6.reporter.step(curr, total, _this6.reporter.lang('linkingDependencies'), emoji.get('link')); + flattenedTopLevelPatterns = _this6.preparePatternsForLinking(flattenedTopLevelPatterns, manifest, _this6.config.lockfileFolder === _this6.config.cwd); + yield _this6.linker.init(flattenedTopLevelPatterns, workspaceLayout, { + linkDuplicates: _this6.flags.linkDuplicates, + ignoreOptional: _this6.flags.ignoreOptional + }); + })); + }); + + if (_this6.config.plugnplayEnabled) { + steps.push(function (curr, total) { + return (0, (_hooks || _load_hooks()).callThroughHook)('pnpStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const pnpPath = `${_this6.config.lockfileFolder}/${(_constants || _load_constants()).PNP_FILENAME}`; + + const code = yield (0, (_generatePnpMap || _load_generatePnpMap()).generatePnpMap)(_this6.config, flattenedTopLevelPatterns, { + resolver: _this6.resolver, + reporter: _this6.reporter, + targetPath: pnpPath, + workspaceLayout + }); + + try { + const file = yield (_fs || _load_fs()).readFile(pnpPath); + if (file === code) { + return; + } + } catch (error) {} + + yield (_fs || _load_fs()).writeFile(pnpPath, code); + yield (_fs || _load_fs()).chmod(pnpPath, 0o755); + })); + }); + } + + steps.push(function (curr, total) { + return (0, (_hooks || _load_hooks()).callThroughHook)('buildStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + _this6.reporter.step(curr, total, _this6.flags.force ? _this6.reporter.lang('rebuildingPackages') : _this6.reporter.lang('buildingFreshPackages'), emoji.get('hammer')); + + if (_this6.config.ignoreScripts) { + _this6.reporter.warn(_this6.reporter.lang('ignoredScripts')); + } else { + yield _this6.scripts.init(flattenedTopLevelPatterns); + } + })); + }); + + if (_this6.flags.har) { + steps.push((() => { + var _ref21 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { + const formattedDate = new Date().toISOString().replace(/:/g, '-'); + const filename = `yarn-install_${formattedDate}.har`; + _this6.reporter.step(curr, total, _this6.reporter.lang('savingHar', filename), emoji.get('black_circle_for_record')); + yield _this6.config.requestManager.saveHar(filename); + }); + + return function (_x3, _x4) { + return _ref21.apply(this, arguments); + }; + })()); + } + + if (yield _this6.shouldClean()) { + steps.push((() => { + var _ref22 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { + _this6.reporter.step(curr, total, _this6.reporter.lang('cleaningModules'), emoji.get('recycle')); + yield (0, (_autoclean || _load_autoclean()).clean)(_this6.config, _this6.reporter); + }); + + return function (_x5, _x6) { + return _ref22.apply(this, arguments); + }; + })()); + } + + let currentStep = 0; + for (var _iterator11 = steps, _isArray11 = Array.isArray(_iterator11), _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) { + var _ref23; + + if (_isArray11) { + if (_i11 >= _iterator11.length) break; + _ref23 = _iterator11[_i11++]; + } else { + _i11 = _iterator11.next(); + if (_i11.done) break; + _ref23 = _i11.value; + } + + const step = _ref23; + + const stepResult = yield step(++currentStep, steps.length); + if (stepResult && stepResult.bailout) { + if (_this6.flags.audit) { + audit.summary(); + } + if (auditFoundProblems) { + _this6.reporter.warn(_this6.reporter.lang('auditRunAuditForDetails')); + } + _this6.maybeOutputUpdate(); + return flattenedTopLevelPatterns; + } + } + + // fin! + if (_this6.flags.audit) { + audit.summary(); + } + if (auditFoundProblems) { + _this6.reporter.warn(_this6.reporter.lang('auditRunAuditForDetails')); + } + yield _this6.saveLockfileAndIntegrity(topLevelPatterns, workspaceLayout); + yield _this6.persistChanges(); + _this6.maybeOutputUpdate(); + _this6.config.requestManager.clearCache(); + return flattenedTopLevelPatterns; + })(); + } + + checkCompatibility() { + var _this7 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + var _ref24 = yield _this7.fetchRequestFromCwd(); + + const manifest = _ref24.manifest; + + yield (_packageCompatibility || _load_packageCompatibility()).checkOne(manifest, _this7.config, _this7.flags.ignoreEngines); + })(); + } + + persistChanges() { + var _this8 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + // get all the different registry manifests in this folder + const manifests = yield _this8.config.getRootManifests(); + + if (yield _this8.applyChanges(manifests)) { + yield _this8.config.saveRootManifests(manifests); + } + })(); + } + + applyChanges(manifests) { + let hasChanged = false; + + if (this.config.plugnplayPersist) { + const object = manifests.npm.object; + + + if (typeof object.installConfig !== 'object') { + object.installConfig = {}; + } + + if (this.config.plugnplayEnabled && object.installConfig.pnp !== true) { + object.installConfig.pnp = true; + hasChanged = true; + } else if (!this.config.plugnplayEnabled && typeof object.installConfig.pnp !== 'undefined') { + delete object.installConfig.pnp; + hasChanged = true; + } + + if (Object.keys(object.installConfig).length === 0) { + delete object.installConfig; + } + } + + return Promise.resolve(hasChanged); + } + + /** + * Check if we should run the cleaning step. + */ + + shouldClean() { + return (_fs || _load_fs()).exists(path.join(this.config.lockfileFolder, (_constants || _load_constants()).CLEAN_FILENAME)); + } + + /** + * TODO + */ + + flatten(patterns) { + var _this9 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + if (!_this9.flags.flat) { + return patterns; + } + + const flattenedPatterns = []; + + for (var _iterator12 = _this9.resolver.getAllDependencyNamesByLevelOrder(patterns), _isArray12 = Array.isArray(_iterator12), _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) { + var _ref25; + + if (_isArray12) { + if (_i12 >= _iterator12.length) break; + _ref25 = _iterator12[_i12++]; + } else { + _i12 = _iterator12.next(); + if (_i12.done) break; + _ref25 = _i12.value; + } + + const name = _ref25; + + const infos = _this9.resolver.getAllInfoForPackageName(name).filter(function (manifest) { + const ref = manifest._reference; + invariant(ref, 'expected package reference'); + return !ref.ignore; + }); + + if (infos.length === 0) { + continue; + } + + if (infos.length === 1) { + // single version of this package + // take out a single pattern as multiple patterns may have resolved to this package + flattenedPatterns.push(_this9.resolver.patternsByPackage[name][0]); + continue; + } + + const options = infos.map(function (info) { + const ref = info._reference; + invariant(ref, 'expected reference'); + return { + // TODO `and is required by {PARENT}`, + name: _this9.reporter.lang('manualVersionResolutionOption', ref.patterns.join(', '), info.version), + + value: info.version + }; + }); + const versions = infos.map(function (info) { + return info.version; + }); + let version; + + const resolutionVersion = _this9.resolutions[name]; + if (resolutionVersion && versions.indexOf(resolutionVersion) >= 0) { + // use json `resolution` version + version = resolutionVersion; + } else { + version = yield _this9.reporter.select(_this9.reporter.lang('manualVersionResolution', name), _this9.reporter.lang('answer'), options); + _this9.resolutions[name] = version; + } + + flattenedPatterns.push(_this9.resolver.collapseAllVersionsOfPackage(name, version)); + } + + // save resolutions to their appropriate root manifest + if (Object.keys(_this9.resolutions).length) { + const manifests = yield _this9.config.getRootManifests(); + + for (const name in _this9.resolutions) { + const version = _this9.resolutions[name]; + + const patterns = _this9.resolver.patternsByPackage[name]; + if (!patterns) { + continue; + } + + let manifest; + for (var _iterator13 = patterns, _isArray13 = Array.isArray(_iterator13), _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) { + var _ref26; + + if (_isArray13) { + if (_i13 >= _iterator13.length) break; + _ref26 = _iterator13[_i13++]; + } else { + _i13 = _iterator13.next(); + if (_i13.done) break; + _ref26 = _i13.value; + } + + const pattern = _ref26; + + manifest = _this9.resolver.getResolvedPattern(pattern); + if (manifest) { + break; + } + } + invariant(manifest, 'expected manifest'); + + const ref = manifest._reference; + invariant(ref, 'expected reference'); + + const object = manifests[ref.registry].object; + object.resolutions = object.resolutions || {}; + object.resolutions[name] = version; + } + + yield _this9.config.saveRootManifests(manifests); + } + + return flattenedPatterns; + })(); + } + + /** + * Remove offline tarballs that are no longer required + */ + + pruneOfflineMirror(lockfile) { + var _this10 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const mirror = _this10.config.getOfflineMirrorPath(); + if (!mirror) { + return; + } + + const requiredTarballs = new Set(); + for (const dependency in lockfile) { + const resolved = lockfile[dependency].resolved; + if (resolved) { + const basename = path.basename(resolved.split('#')[0]); + if (dependency[0] === '@' && basename[0] !== '@') { + requiredTarballs.add(`${dependency.split('/')[0]}-${basename}`); + } + requiredTarballs.add(basename); + } + } + + const mirrorFiles = yield (_fs || _load_fs()).walk(mirror); + for (var _iterator14 = mirrorFiles, _isArray14 = Array.isArray(_iterator14), _i14 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) { + var _ref27; + + if (_isArray14) { + if (_i14 >= _iterator14.length) break; + _ref27 = _iterator14[_i14++]; + } else { + _i14 = _iterator14.next(); + if (_i14.done) break; + _ref27 = _i14.value; + } + + const file = _ref27; + + const isTarball = path.extname(file.basename) === '.tgz'; + // if using experimental-pack-script-packages-in-mirror flag, don't unlink prebuilt packages + const hasPrebuiltPackage = file.relative.startsWith('prebuilt/'); + if (isTarball && !hasPrebuiltPackage && !requiredTarballs.has(file.basename)) { + yield (_fs || _load_fs()).unlink(file.absolute); + } + } + })(); + } + + /** + * Save updated integrity and lockfiles. + */ + + saveLockfileAndIntegrity(patterns, workspaceLayout) { + var _this11 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const resolvedPatterns = {}; + Object.keys(_this11.resolver.patterns).forEach(function (pattern) { + if (!workspaceLayout || !workspaceLayout.getManifestByPattern(pattern)) { + resolvedPatterns[pattern] = _this11.resolver.patterns[pattern]; + } + }); + + // TODO this code is duplicated in a few places, need a common way to filter out workspace patterns from lockfile + patterns = patterns.filter(function (p) { + return !workspaceLayout || !workspaceLayout.getManifestByPattern(p); + }); + + const lockfileBasedOnResolver = _this11.lockfile.getLockfile(resolvedPatterns); + + if (_this11.config.pruneOfflineMirror) { + yield _this11.pruneOfflineMirror(lockfileBasedOnResolver); + } + + // write integrity hash + if (!_this11.config.plugnplayEnabled) { + yield _this11.integrityChecker.save(patterns, lockfileBasedOnResolver, _this11.flags, workspaceLayout, _this11.scripts.getArtifacts()); + } + + // --no-lockfile or --pure-lockfile or --frozen-lockfile + if (_this11.flags.lockfile === false || _this11.flags.pureLockfile || _this11.flags.frozenLockfile) { + return; + } + + const lockFileHasAllPatterns = patterns.every(function (p) { + return _this11.lockfile.getLocked(p); + }); + const lockfilePatternsMatch = Object.keys(_this11.lockfile.cache || {}).every(function (p) { + return lockfileBasedOnResolver[p]; + }); + const resolverPatternsAreSameAsInLockfile = Object.keys(lockfileBasedOnResolver).every(function (pattern) { + const manifest = _this11.lockfile.getLocked(pattern); + return manifest && manifest.resolved === lockfileBasedOnResolver[pattern].resolved && deepEqual(manifest.prebuiltVariants, lockfileBasedOnResolver[pattern].prebuiltVariants); + }); + const integrityPatternsAreSameAsInLockfile = Object.keys(lockfileBasedOnResolver).every(function (pattern) { + const existingIntegrityInfo = lockfileBasedOnResolver[pattern].integrity; + if (!existingIntegrityInfo) { + // if this entry does not have an integrity, no need to re-write the lockfile because of it + return true; + } + const manifest = _this11.lockfile.getLocked(pattern); + if (manifest && manifest.integrity) { + const manifestIntegrity = ssri.stringify(manifest.integrity); + return manifestIntegrity === existingIntegrityInfo; + } + return false; + }); + + // remove command is followed by install with force, lockfile will be rewritten in any case then + if (!_this11.flags.force && _this11.lockfile.parseResultType === 'success' && lockFileHasAllPatterns && lockfilePatternsMatch && resolverPatternsAreSameAsInLockfile && integrityPatternsAreSameAsInLockfile && patterns.length) { + return; + } + + // build lockfile location + const loc = path.join(_this11.config.lockfileFolder, (_constants || _load_constants()).LOCKFILE_FILENAME); + + // write lockfile + const lockSource = (0, (_lockfile2 || _load_lockfile2()).stringify)(lockfileBasedOnResolver, false, _this11.config.enableLockfileVersions); + yield (_fs || _load_fs()).writeFilePreservingEol(loc, lockSource); + + _this11._logSuccessSaveLockfile(); + })(); + } + + _logSuccessSaveLockfile() { + this.reporter.success(this.reporter.lang('savedLockfile')); + } + + /** + * Load the dependency graph of the current install. Only does package resolving and wont write to the cwd. + */ + hydrate(ignoreUnusedPatterns) { + var _this12 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + const request = yield _this12.fetchRequestFromCwd([], ignoreUnusedPatterns); + const depRequests = request.requests, + rawPatterns = request.patterns, + ignorePatterns = request.ignorePatterns, + workspaceLayout = request.workspaceLayout; + + + yield _this12.resolver.init(depRequests, { + isFlat: _this12.flags.flat, + isFrozen: _this12.flags.frozenLockfile, + workspaceLayout + }); + yield _this12.flatten(rawPatterns); + _this12.markIgnored(ignorePatterns); + + // fetch packages, should hit cache most of the time + const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this12.resolver.getManifests(), _this12.config); + _this12.resolver.updateManifests(manifests); + yield (_packageCompatibility || _load_packageCompatibility()).check(_this12.resolver.getManifests(), _this12.config, _this12.flags.ignoreEngines); + + // expand minimal manifests + for (var _iterator15 = _this12.resolver.getManifests(), _isArray15 = Array.isArray(_iterator15), _i15 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) { + var _ref28; + + if (_isArray15) { + if (_i15 >= _iterator15.length) break; + _ref28 = _iterator15[_i15++]; + } else { + _i15 = _iterator15.next(); + if (_i15.done) break; + _ref28 = _i15.value; + } + + const manifest = _ref28; + + const ref = manifest._reference; + invariant(ref, 'expected reference'); + const type = ref.remote.type; + // link specifier won't ever hit cache + + let loc = ''; + if (type === 'link') { + continue; + } else if (type === 'workspace') { + if (!ref.remote.reference) { + continue; + } + loc = ref.remote.reference; + } else { + loc = _this12.config.generateModuleCachePath(ref); + } + const newPkg = yield _this12.config.readManifest(loc); + yield _this12.resolver.updateManifest(ref, newPkg); + } + + return request; + })(); + } + + /** + * Check for updates every day and output a nag message if there's a newer version. + */ + + checkUpdate() { + if (this.config.nonInteractive) { + // don't show upgrade dialog on CI or non-TTY terminals + return; + } + + // don't check if disabled + if (this.config.getOption('disable-self-update-check')) { + return; + } + + // only check for updates once a day + const lastUpdateCheck = Number(this.config.getOption('lastUpdateCheck')) || 0; + if (lastUpdateCheck && Date.now() - lastUpdateCheck < ONE_DAY) { + return; + } + + // don't bug for updates on tagged releases + if ((_yarnVersion || _load_yarnVersion()).version.indexOf('-') >= 0) { + return; + } + + this._checkUpdate().catch(() => { + // swallow errors + }); + } + + _checkUpdate() { + var _this13 = this; + + return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { + let latestVersion = yield _this13.config.requestManager.request({ + url: (_constants || _load_constants()).SELF_UPDATE_VERSION_URL + }); + invariant(typeof latestVersion === 'string', 'expected string'); + latestVersion = latestVersion.trim(); + if (!semver.valid(latestVersion)) { + return; + } + + // ensure we only check for updates periodically + _this13.config.registries.yarn.saveHomeConfig({ + lastUpdateCheck: Date.now() + }); + + if (semver.gt(latestVersion, (_yarnVersion || _load_yarnVersion()).version)) { + const installationMethod = yield (0, (_yarnVersion || _load_yarnVersion()).getInstallationMethod)(); + _this13.maybeOutputUpdate = function () { + _this13.reporter.warn(_this13.reporter.lang('yarnOutdated', latestVersion, (_yarnVersion || _load_yarnVersion()).version)); + + const command = getUpdateCommand(installationMethod); + if (command) { + _this13.reporter.info(_this13.reporter.lang('yarnOutdatedCommand')); + _this13.reporter.command(command); + } else { + const installer = getUpdateInstaller(installationMethod); + if (installer) { + _this13.reporter.info(_this13.reporter.lang('yarnOutdatedInstaller', installer)); + } + } + }; + } + })(); + } + + /** + * Method to override with a possible upgrade message. + */ + + maybeOutputUpdate() {} +} + +exports.Install = Install; +function hasWrapper(commander, args) { + return true; +} + +function setFlags(commander) { + commander.description('Yarn install is used to install all dependencies for a project.'); + commander.usage('install [flags]'); + commander.option('-A, --audit', 'Run vulnerability audit on installed packages'); + commander.option('-g, --global', 'DEPRECATED'); + commander.option('-S, --save', 'DEPRECATED - save package to your `dependencies`'); + commander.option('-D, --save-dev', 'DEPRECATED - save package to your `devDependencies`'); + commander.option('-P, --save-peer', 'DEPRECATED - save package to your `peerDependencies`'); + commander.option('-O, --save-optional', 'DEPRECATED - save package to your `optionalDependencies`'); + commander.option('-E, --save-exact', 'DEPRECATED'); + commander.option('-T, --save-tilde', 'DEPRECATED'); +} + +/***/ }), +/* 35 */ +/***/ (function(module, exports, __webpack_require__) { + +var isObject = __webpack_require__(52); +module.exports = function (it) { + if (!isObject(it)) throw TypeError(it + ' is not an object!'); + return it; +}; + + +/***/ }), +/* 36 */ +/***/ (function(module, __webpack_exports__, __webpack_require__) { + +"use strict"; +/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return SubjectSubscriber; }); +/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subject; }); +/* unused harmony export AnonymousSubject */ +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__Observable__ = __webpack_require__(12); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__Subscriber__ = __webpack_require__(7); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__Subscription__ = __webpack_require__(25); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__ = __webpack_require__(189); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__SubjectSubscription__ = __webpack_require__(422); +/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__internal_symbol_rxSubscriber__ = __webpack_require__(321); +/** PURE_IMPORTS_START tslib,_Observable,_Subscriber,_Subscription,_util_ObjectUnsubscribedError,_SubjectSubscription,_internal_symbol_rxSubscriber PURE_IMPORTS_END */ + + + + + + + +var SubjectSubscriber = /*@__PURE__*/ (function (_super) { + __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](SubjectSubscriber, _super); + function SubjectSubscriber(destination) { + var _this = _super.call(this, destination) || this; + _this.destination = destination; + return _this; + } + return SubjectSubscriber; +}(__WEBPACK_IMPORTED_MODULE_2__Subscriber__["a" /* Subscriber */])); + +var Subject = /*@__PURE__*/ (function (_super) { + __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](Subject, _super); + function Subject() { + var _this = _super.call(this) || this; + _this.observers = []; + _this.closed = false; + _this.isStopped = false; + _this.hasError = false; + _this.thrownError = null; + return _this; + } + Subject.prototype[__WEBPACK_IMPORTED_MODULE_6__internal_symbol_rxSubscriber__["a" /* rxSubscriber */]] = function () { + return new SubjectSubscriber(this); + }; + Subject.prototype.lift = function (operator) { + var subject = new AnonymousSubject(this, this); + subject.operator = operator; + return subject; + }; + Subject.prototype.next = function (value) { + if (this.closed) { + throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); + } + if (!this.isStopped) { + var observers = this.observers; + var len = observers.length; + var copy = observers.slice(); + for (var i = 0; i < len; i++) { + copy[i].next(value); + } + } + }; + Subject.prototype.error = function (err) { + if (this.closed) { + throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); + } + this.hasError = true; + this.thrownError = err; + this.isStopped = true; + var observers = this.observers; + var len = observers.length; + var copy = observers.slice(); + for (var i = 0; i < len; i++) { + copy[i].error(err); + } + this.observers.length = 0; + }; + Subject.prototype.complete = function () { + if (this.closed) { + throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); + } + this.isStopped = true; + var observers = this.observers; + var len = observers.length; + var copy = observers.slice(); + for (var i = 0; i < len; i++) { + copy[i].complete(); + } + this.observers.length = 0; + }; + Subject.prototype.unsubscribe = function () { + this.isStopped = true; + this.closed = true; + this.observers = null; + }; + Subject.prototype._trySubscribe = function (subscriber) { + if (this.closed) { + throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); + } + else { + return _super.prototype._trySubscribe.call(this, subscriber); + } + }; + Subject.prototype._subscribe = function (subscriber) { + if (this.closed) { + throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); + } + else if (this.hasError) { + subscriber.error(this.thrownError); + return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; + } + else if (this.isStopped) { + subscriber.complete(); + return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; + } + else { + this.observers.push(subscriber); + return new __WEBPACK_IMPORTED_MODULE_5__SubjectSubscription__["a" /* SubjectSubscription */](this, subscriber); + } + }; + Subject.prototype.asObservable = function () { + var observable = new __WEBPACK_IMPORTED_MODULE_1__Observable__["a" /* Observable */](); + observable.source = this; + return observable; + }; + Subject.create = function (destination, source) { + return new AnonymousSubject(destination, source); + }; + return Subject; +}(__WEBPACK_IMPORTED_MODULE_1__Observable__["a" /* Observable */])); + +var AnonymousSubject = /*@__PURE__*/ (function (_super) { + __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](AnonymousSubject, _super); + function AnonymousSubject(destination, source) { + var _this = _super.call(this) || this; + _this.destination = destination; + _this.source = source; + return _this; + } + AnonymousSubject.prototype.next = function (value) { + var destination = this.destination; + if (destination && destination.next) { + destination.next(value); + } + }; + AnonymousSubject.prototype.error = function (err) { + var destination = this.destination; + if (destination && destination.error) { + this.destination.error(err); + } + }; + AnonymousSubject.prototype.complete = function () { + var destination = this.destination; + if (destination && destination.complete) { + this.destination.complete(); + } + }; + AnonymousSubject.prototype._subscribe = function (subscriber) { + var source = this.source; + if (source) { + return this.source.subscribe(subscriber); + } + else { + return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; + } + }; + return AnonymousSubject; +}(Subject)); + +//# sourceMappingURL=Subject.js.map + + +/***/ }), +/* 37 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +Object.defineProperty(exports, "__esModule", { + value: true +}); +exports.normalizePattern = normalizePattern; + +/** + * Explode and normalize a pattern into its name and range. + */ + +function normalizePattern(pattern) { + let hasVersion = false; + let range = 'latest'; + let name = pattern; + + // if we're a scope then remove the @ and add it back later + let isScoped = false; + if (name[0] === '@') { + isScoped = true; + name = name.slice(1); + } + + // take first part as the name + const parts = name.split('@'); + if (parts.length > 1) { + name = parts.shift(); + range = parts.join('@'); + + if (range) { + hasVersion = true; + } else { + range = '*'; + } + } + + // add back @ scope suffix + if (isScoped) { + name = `@${name}`; + } + + return { name, range, hasVersion }; +} + +/***/ }), +/* 38 */ +/***/ (function(module, exports, __webpack_require__) { + +/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;/** + * @license + * Lodash + * Copyright JS Foundation and other contributors + * Released under MIT license + * Based on Underscore.js 1.8.3 + * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors + */ +;(function() { + + /** Used as a safe reference for `undefined` in pre-ES5 environments. */ + var undefined; + + /** Used as the semantic version number. */ + var VERSION = '4.17.10'; + + /** Used as the size to enable large array optimizations. */ + var LARGE_ARRAY_SIZE = 200; + + /** Error message constants. */ + var CORE_ERROR_TEXT = 'Unsupported core-js use. Try https://npms.io/search?q=ponyfill.', + FUNC_ERROR_TEXT = 'Expected a function'; + + /** Used to stand-in for `undefined` hash values. */ + var HASH_UNDEFINED = '__lodash_hash_undefined__'; + + /** Used as the maximum memoize cache size. */ + var MAX_MEMOIZE_SIZE = 500; + + /** Used as the internal argument placeholder. */ + var PLACEHOLDER = '__lodash_placeholder__'; + + /** Used to compose bitmasks for cloning. */ + var CLONE_DEEP_FLAG = 1, + CLONE_FLAT_FLAG = 2, + CLONE_SYMBOLS_FLAG = 4; + + /** Used to compose bitmasks for value comparisons. */ + var COMPARE_PARTIAL_FLAG = 1, + COMPARE_UNORDERED_FLAG = 2; + + /** Used to compose bitmasks for function metadata. */ + var WRAP_BIND_FLAG = 1, + WRAP_BIND_KEY_FLAG = 2, + WRAP_CURRY_BOUND_FLAG = 4, + WRAP_CURRY_FLAG = 8, + WRAP_CURRY_RIGHT_FLAG = 16, + WRAP_PARTIAL_FLAG = 32, + WRAP_PARTIAL_RIGHT_FLAG = 64, + WRAP_ARY_FLAG = 128, + WRAP_REARG_FLAG = 256, + WRAP_FLIP_FLAG = 512; + + /** Used as default options for `_.truncate`. */ + var DEFAULT_TRUNC_LENGTH = 30, + DEFAULT_TRUNC_OMISSION = '...'; + + /** Used to detect hot functions by number of calls within a span of milliseconds. */ + var HOT_COUNT = 800, + HOT_SPAN = 16; + + /** Used to indicate the type of lazy iteratees. */ + var LAZY_FILTER_FLAG = 1, + LAZY_MAP_FLAG = 2, + LAZY_WHILE_FLAG = 3; + + /** Used as references for various `Number` constants. */ + var INFINITY = 1 / 0, + MAX_SAFE_INTEGER = 9007199254740991, + MAX_INTEGER = 1.7976931348623157e+308, + NAN = 0 / 0; + + /** Used as references for the maximum length and index of an array. */ + var MAX_ARRAY_LENGTH = 4294967295, + MAX_ARRAY_INDEX = MAX_ARRAY_LENGTH - 1, + HALF_MAX_ARRAY_LENGTH = MAX_ARRAY_LENGTH >>> 1; + + /** Used to associate wrap methods with their bit flags. */ + var wrapFlags = [ + ['ary', WRAP_ARY_FLAG], + ['bind', WRAP_BIND_FLAG], + ['bindKey', WRAP_BIND_KEY_FLAG], + ['curry', WRAP_CURRY_FLAG], + ['curryRight', WRAP_CURRY_RIGHT_FLAG], + ['flip', WRAP_FLIP_FLAG], + ['partial', WRAP_PARTIAL_FLAG], + ['partialRight', WRAP_PARTIAL_RIGHT_FLAG], + ['rearg', WRAP_REARG_FLAG] + ]; + + /** `Object#toString` result references. */ + var argsTag = '[object Arguments]', + arrayTag = '[object Array]', + asyncTag = '[object AsyncFunction]', + boolTag = '[object Boolean]', + dateTag = '[object Date]', + domExcTag = '[object DOMException]', + errorTag = '[object Error]', + funcTag = '[object Function]', + genTag = '[object GeneratorFunction]', + mapTag = '[object Map]', + numberTag = '[object Number]', + nullTag = '[object Null]', + objectTag = '[object Object]', + promiseTag = '[object Promise]', + proxyTag = '[object Proxy]', + regexpTag = '[object RegExp]', + setTag = '[object Set]', + stringTag = '[object String]', + symbolTag = '[object Symbol]', + undefinedTag = '[object Undefined]', + weakMapTag = '[object WeakMap]', + weakSetTag = '[object WeakSet]'; + + var arrayBufferTag = '[object ArrayBuffer]', + dataViewTag = '[object DataView]', + float32Tag = '[object Float32Array]', + float64Tag = '[object Float64Array]', + int8Tag = '[object Int8Array]', + int16Tag = '[object Int16Array]', + int32Tag = '[object Int32Array]', + uint8Tag = '[object Uint8Array]', + uint8ClampedTag = '[object Uint8ClampedArray]', + uint16Tag = '[object Uint16Array]', + uint32Tag = '[object Uint32Array]'; + + /** Used to match empty string literals in compiled template source. */ + var reEmptyStringLeading = /\b__p \+= '';/g, + reEmptyStringMiddle = /\b(__p \+=) '' \+/g, + reEmptyStringTrailing = /(__e\(.*?\)|\b__t\)) \+\n'';/g; + + /** Used to match HTML entities and HTML characters. */ + var reEscapedHtml = /&(?:amp|lt|gt|quot|#39);/g, + reUnescapedHtml = /[&<>"']/g, + reHasEscapedHtml = RegExp(reEscapedHtml.source), + reHasUnescapedHtml = RegExp(reUnescapedHtml.source); + + /** Used to match template delimiters. */ + var reEscape = /<%-([\s\S]+?)%>/g, + reEvaluate = /<%([\s\S]+?)%>/g, + reInterpolate = /<%=([\s\S]+?)%>/g; + + /** Used to match property names within property paths. */ + var reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/, + reIsPlainProp = /^\w*$/, + rePropName = /[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g; + + /** + * Used to match `RegExp` + * [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns). + */ + var reRegExpChar = /[\\^$.*+?()[\]{}|]/g, + reHasRegExpChar = RegExp(reRegExpChar.source); + + /** Used to match leading and trailing whitespace. */ + var reTrim = /^\s+|\s+$/g, + reTrimStart = /^\s+/, + reTrimEnd = /\s+$/; + + /** Used to match wrap detail comments. */ + var reWrapComment = /\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/, + reWrapDetails = /\{\n\/\* \[wrapped with (.+)\] \*/, + reSplitDetails = /,? & /; + + /** Used to match words composed of alphanumeric characters. */ + var reAsciiWord = /[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g; + + /** Used to match backslashes in property paths. */ + var reEscapeChar = /\\(\\)?/g; + + /** + * Used to match + * [ES template delimiters](http://ecma-international.org/ecma-262/7.0/#sec-template-literal-lexical-components). + */ + var reEsTemplate = /\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g; + + /** Used to match `RegExp` flags from their coerced string values. */ + var reFlags = /\w*$/; + + /** Used to detect bad signed hexadecimal string values. */ + var reIsBadHex = /^[-+]0x[0-9a-f]+$/i; + + /** Used to detect binary string values. */ + var reIsBinary = /^0b[01]+$/i; + + /** Used to detect host constructors (Safari). */ + var reIsHostCtor = /^\[object .+?Constructor\]$/; + + /** Used to detect octal string values. */ + var reIsOctal = /^0o[0-7]+$/i; + + /** Used to detect unsigned integer values. */ + var reIsUint = /^(?:0|[1-9]\d*)$/; + + /** Used to match Latin Unicode letters (excluding mathematical operators). */ + var reLatin = /[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g; + + /** Used to ensure capturing order of template delimiters. */ + var reNoMatch = /($^)/; + + /** Used to match unescaped characters in compiled string literals. */ + var reUnescapedString = /['\n\r\u2028\u2029\\]/g; + + /** Used to compose unicode character classes. */ + var rsAstralRange = '\\ud800-\\udfff', + rsComboMarksRange = '\\u0300-\\u036f', + reComboHalfMarksRange = '\\ufe20-\\ufe2f', + rsComboSymbolsRange = '\\u20d0-\\u20ff', + rsComboRange = rsComboMarksRange + reComboHalfMarksRange + rsComboSymbolsRange, + rsDingbatRange = '\\u2700-\\u27bf', + rsLowerRange = 'a-z\\xdf-\\xf6\\xf8-\\xff', + rsMathOpRange = '\\xac\\xb1\\xd7\\xf7', + rsNonCharRange = '\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf', + rsPunctuationRange = '\\u2000-\\u206f', + rsSpaceRange = ' \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000', + rsUpperRange = 'A-Z\\xc0-\\xd6\\xd8-\\xde', + rsVarRange = '\\ufe0e\\ufe0f', + rsBreakRange = rsMathOpRange + rsNonCharRange + rsPunctuationRange + rsSpaceRange; + + /** Used to compose unicode capture groups. */ + var rsApos = "['\u2019]", + rsAstral = '[' + rsAstralRange + ']', + rsBreak = '[' + rsBreakRange + ']', + rsCombo = '[' + rsComboRange + ']', + rsDigits = '\\d+', + rsDingbat = '[' + rsDingbatRange + ']', + rsLower = '[' + rsLowerRange + ']', + rsMisc = '[^' + rsAstralRange + rsBreakRange + rsDigits + rsDingbatRange + rsLowerRange + rsUpperRange + ']', + rsFitz = '\\ud83c[\\udffb-\\udfff]', + rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')', + rsNonAstral = '[^' + rsAstralRange + ']', + rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}', + rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]', + rsUpper = '[' + rsUpperRange + ']', + rsZWJ = '\\u200d'; + + /** Used to compose unicode regexes. */ + var rsMiscLower = '(?:' + rsLower + '|' + rsMisc + ')', + rsMiscUpper = '(?:' + rsUpper + '|' + rsMisc + ')', + rsOptContrLower = '(?:' + rsApos + '(?:d|ll|m|re|s|t|ve))?', + rsOptContrUpper = '(?:' + rsApos + '(?:D|LL|M|RE|S|T|VE))?', + reOptMod = rsModifier + '?', + rsOptVar = '[' + rsVarRange + ']?', + rsOptJoin = '(?:' + rsZWJ + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*', + rsOrdLower = '\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])', + rsOrdUpper = '\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])', + rsSeq = rsOptVar + reOptMod + rsOptJoin, + rsEmoji = '(?:' + [rsDingbat, rsRegional, rsSurrPair].join('|') + ')' + rsSeq, + rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')'; + + /** Used to match apostrophes. */ + var reApos = RegExp(rsApos, 'g'); + + /** + * Used to match [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks) and + * [combining diacritical marks for symbols](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks_for_Symbols). + */ + var reComboMark = RegExp(rsCombo, 'g'); + + /** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */ + var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g'); + + /** Used to match complex or compound words. */ + var reUnicodeWord = RegExp([ + rsUpper + '?' + rsLower + '+' + rsOptContrLower + '(?=' + [rsBreak, rsUpper, '$'].join('|') + ')', + rsMiscUpper + '+' + rsOptContrUpper + '(?=' + [rsBreak, rsUpper + rsMiscLower, '$'].join('|') + ')', + rsUpper + '?' + rsMiscLower + '+' + rsOptContrLower, + rsUpper + '+' + rsOptContrUpper, + rsOrdUpper, + rsOrdLower, + rsDigits, + rsEmoji + ].join('|'), 'g'); + + /** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */ + var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboRange + rsVarRange + ']'); + + /** Used to detect strings that need a more robust regexp to match words. */ + var reHasUnicodeWord = /[a-z][A-Z]|[A-Z]{2,}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/; + + /** Used to assign default `context` object properties. */ + var contextProps = [ + 'Array', 'Buffer', 'DataView', 'Date', 'Error', 'Float32Array', 'Float64Array', + 'Function', 'Int8Array', 'Int16Array', 'Int32Array', 'Map', 'Math', 'Object', + 'Promise', 'RegExp', 'Set', 'String', 'Symbol', 'TypeError', 'Uint8Array', + 'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', 'WeakMap', + '_', 'clearTimeout', 'isFinite', 'parseInt', 'setTimeout' + ]; + + /** Used to make template sourceURLs easier to identify. */ + var templateCounter = -1; + + /** Used to identify `toStringTag` values of typed arrays. */ + var typedArrayTags = {}; + typedArrayTags[float32Tag] = typedArrayTags[float64Tag] = + typedArrayTags[int8Tag] = typedArrayTags[int16Tag] = + typedArrayTags[int32Tag] = typedArrayTags[uint8Tag] = + typedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] = + typedArrayTags[uint32Tag] = true; + typedArrayTags[argsTag] = typedArrayTags[arrayTag] = + typedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] = + typedArrayTags[dataViewTag] = typedArrayTags[dateTag] = + typedArrayTags[errorTag] = typedArrayTags[funcTag] = + typedArrayTags[mapTag] = typedArrayTags[numberTag] = + typedArrayTags[objectTag] = typedArrayTags[regexpTag] = + typedArrayTags[setTag] = typedArrayTags[stringTag] = + typedArrayTags[weakMapTag] = false; + + /** Used to identify `toStringTag` values supported by `_.clone`. */ + var cloneableTags = {}; + cloneableTags[argsTag] = cloneableTags[arrayTag] = + cloneableTags[arrayBufferTag] = cloneableTags[dataViewTag] = + cloneableTags[boolTag] = cloneableTags[dateTag] = + cloneableTags[float32Tag] = cloneableTags[float64Tag] = + cloneableTags[int8Tag] = cloneableTags[int16Tag] = + cloneableTags[int32Tag] = cloneableTags[mapTag] = + cloneableTags[numberTag] = cloneableTags[objectTag] = + cloneableTags[regexpTag] = cloneableTags[setTag] = + cloneableTags[stringTag] = cloneableTags[symbolTag] = + cloneableTags[uint8Tag] = cloneableTags[uint8ClampedTag] = + cloneableTags[uint16Tag] = cloneableTags[uint32Tag] = true; + cloneableTags[errorTag] = cloneableTags[funcTag] = + cloneableTags[weakMapTag] = false; + + /** Used to map Latin Unicode letters to basic Latin letters. */ + var deburredLetters = { + // Latin-1 Supplement block. + '\xc0': 'A', '\xc1': 'A', '\xc2': 'A', '\xc3': 'A', '\xc4': 'A', '\xc5': 'A', + '\xe0': 'a', '\xe1': 'a', '\xe2': 'a', '\xe3': 'a', '\xe4': 'a', '\xe5': 'a', + '\xc7': 'C', '\xe7': 'c', + '\xd0': 'D', '\xf0': 'd', + '\xc8': 'E', '\xc9': 'E', '\xca': 'E', '\xcb': 'E', + '\xe8': 'e', '\xe9': 'e', '\xea': 'e', '\xeb': 'e', + '\xcc': 'I', '\xcd': 'I', '\xce': 'I', '\xcf': 'I', + '\xec': 'i', '\xed': 'i', '\xee': 'i', '\xef': 'i', + '\xd1': 'N', '\xf1': 'n', + '\xd2': 'O', '\xd3': 'O', '\xd4': 'O', '\xd5': 'O', '\xd6': 'O', '\xd8': 'O', + '\xf2': 'o', '\xf3': 'o', '\xf4': 'o', '\xf5': 'o', '\xf6': 'o', '\xf8': 'o', + '\xd9': 'U', '\xda': 'U', '\xdb': 'U', '\xdc': 'U', + '\xf9': 'u', '\xfa': 'u', '\xfb': 'u', '\xfc': 'u', + '\xdd': 'Y', '\xfd': 'y', '\xff': 'y', + '\xc6': 'Ae', '\xe6': 'ae', + '\xde': 'Th', '\xfe': 'th', + '\xdf': 'ss', + // Latin Extended-A block. + '\u0100': 'A', '\u0102': 'A', '\u0104': 'A', + '\u0101': 'a', '\u0103': 'a', '\u0105': 'a', + '\u0106': 'C', '\u0108': 'C', '\u010a': 'C', '\u010c': 'C', + '\u0107': 'c', '\u0109': 'c', '\u010b': 'c', '\u010d': 'c', + '\u010e': 'D', '\u0110': 'D', '\u010f': 'd', '\u0111': 'd', + '\u0112': 'E', '\u0114': 'E', '\u0116': 'E', '\u0118': 'E', '\u011a': 'E', + '\u0113': 'e', '\u0115': 'e', '\u0117': 'e', '\u0119': 'e', '\u011b': 'e', + '\u011c': 'G', '\u011e': 'G', '\u0120': 'G', '\u0122': 'G', + '\u011d': 'g', '\u011f': 'g', '\u0121': 'g', '\u0123': 'g', + '\u0124': 'H', '\u0126': 'H', '\u0125': 'h', '\u0127': 'h', + '\u0128': 'I', '\u012a': 'I', '\u012c': 'I', '\u012e': 'I', '\u0130': 'I', + '\u0129': 'i', '\u012b': 'i', '\u012d': 'i', '\u012f': 'i', '\u0131': 'i', + '\u0134': 'J', '\u0135': 'j', + '\u0136': 'K', '\u0137': 'k', '\u0138': 'k', + '\u0139': 'L', '\u013b': 'L', '\u013d': 'L', '\u013f': 'L', '\u0141': 'L', + '\u013a': 'l', '\u013c': 'l', '\u013e': 'l', '\u0140': 'l', '\u0142': 'l', + '\u0143': 'N', '\u0145': 'N', '\u0147': 'N', '\u014a': 'N', + '\u0144': 'n', '\u0146': 'n', '\u0148': 'n', '\u014b': 'n', + '\u014c': 'O', '\u014e': 'O', '\u0150': 'O', + '\u014d': 'o', '\u014f': 'o', '\u0151': 'o', + '\u0154': 'R', '\u0156': 'R', '\u0158': 'R', + '\u0155': 'r', '\u0157': 'r', '\u0159': 'r', + '\u015a': 'S', '\u015c': 'S', '\u015e': 'S', '\u0160': 'S', + '\u015b': 's', '\u015d': 's', '\u015f': 's', '\u0161': 's', + '\u0162': 'T', '\u0164': 'T', '\u0166': 'T', + '\u0163': 't', '\u0165': 't', '\u0167': 't', + '\u0168': 'U', '\u016a': 'U', '\u016c': 'U', '\u016e': 'U', '\u0170': 'U', '\u0172': 'U', + '\u0169': 'u', '\u016b': 'u', '\u016d': 'u', '\u016f': 'u', '\u0171': 'u', '\u0173': 'u', + '\u0174': 'W', '\u0175': 'w', + '\u0176': 'Y', '\u0177': 'y', '\u0178': 'Y', + '\u0179': 'Z', '\u017b': 'Z', '\u017d': 'Z', + '\u017a': 'z', '\u017c': 'z', '\u017e': 'z', + '\u0132': 'IJ', '\u0133': 'ij', + '\u0152': 'Oe', '\u0153': 'oe', + '\u0149': "'n", '\u017f': 's' + }; + + /** Used to map characters to HTML entities. */ + var htmlEscapes = { + '&': '&', + '<': '<', + '>': '>', + '"': '"', + "'": ''' + }; + + /** Used to map HTML entities to characters. */ + var htmlUnescapes = { + '&': '&', + '<': '<', + '>': '>', + '"': '"', + ''': "'" + }; + + /** Used to escape characters for inclusion in compiled string literals. */ + var stringEscapes = { + '\\': '\\', + "'": "'", + '\n': 'n', + '\r': 'r', + '\u2028': 'u2028', + '\u2029': 'u2029' + }; + + /** Built-in method references without a dependency on `root`. */ + var freeParseFloat = parseFloat, + freeParseInt = parseInt; + + /** Detect free variable `global` from Node.js. */ + var freeGlobal = typeof global == 'object' && global && global.Object === Object && global; + + /** Detect free variable `self`. */ + var freeSelf = typeof self == 'object' && self && self.Object === Object && self; + + /** Used as a reference to the global object. */ + var root = freeGlobal || freeSelf || Function('return this')(); + + /** Detect free variable `exports`. */ + var freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports; + + /** Detect free variable `module`. */ + var freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module; + + /** Detect the popular CommonJS extension `module.exports`. */ + var moduleExports = freeModule && freeModule.exports === freeExports; + + /** Detect free variable `process` from Node.js. */ + var freeProcess = moduleExports && freeGlobal.process; + + /** Used to access faster Node.js helpers. */ + var nodeUtil = (function() { + try { + // Use `util.types` for Node.js 10+. + var types = freeModule && freeModule.require && freeModule.require('util').types; + + if (types) { + return types; + } + + // Legacy `process.binding('util')` for Node.js < 10. + return freeProcess && freeProcess.binding && freeProcess.binding('util'); + } catch (e) {} + }()); + + /* Node.js helper references. */ + var nodeIsArrayBuffer = nodeUtil && nodeUtil.isArrayBuffer, + nodeIsDate = nodeUtil && nodeUtil.isDate, + nodeIsMap = nodeUtil && nodeUtil.isMap, + nodeIsRegExp = nodeUtil && nodeUtil.isRegExp, + nodeIsSet = nodeUtil && nodeUtil.isSet, + nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray; + + /*--------------------------------------------------------------------------*/ + + /** + * A faster alternative to `Function#apply`, this function invokes `func` + * with the `this` binding of `thisArg` and the arguments of `args`. + * + * @private + * @param {Function} func The function to invoke. + * @param {*} thisArg The `this` binding of `func`. + * @param {Array} args The arguments to invoke `func` with. + * @returns {*} Returns the result of `func`. + */ + function apply(func, thisArg, args) { + switch (args.length) { + case 0: return func.call(thisArg); + case 1: return func.call(thisArg, args[0]); + case 2: return func.call(thisArg, args[0], args[1]); + case 3: return func.call(thisArg, args[0], args[1], args[2]); + } + return func.apply(thisArg, args); + } + + /** + * A specialized version of `baseAggregator` for arrays. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} setter The function to set `accumulator` values. + * @param {Function} iteratee The iteratee to transform keys. + * @param {Object} accumulator The initial aggregated object. + * @returns {Function} Returns `accumulator`. + */ + function arrayAggregator(array, setter, iteratee, accumulator) { + var index = -1, + length = array == null ? 0 : array.length; + + while (++index < length) { + var value = array[index]; + setter(accumulator, value, iteratee(value), array); + } + return accumulator; + } + + /** + * A specialized version of `_.forEach` for arrays without support for + * iteratee shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array} Returns `array`. + */ + function arrayEach(array, iteratee) { + var index = -1, + length = array == null ? 0 : array.length; + + while (++index < length) { + if (iteratee(array[index], index, array) === false) { + break; + } + } + return array; + } + + /** + * A specialized version of `_.forEachRight` for arrays without support for + * iteratee shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array} Returns `array`. + */ + function arrayEachRight(array, iteratee) { + var length = array == null ? 0 : array.length; + + while (length--) { + if (iteratee(array[length], length, array) === false) { + break; + } + } + return array; + } + + /** + * A specialized version of `_.every` for arrays without support for + * iteratee shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} predicate The function invoked per iteration. + * @returns {boolean} Returns `true` if all elements pass the predicate check, + * else `false`. + */ + function arrayEvery(array, predicate) { + var index = -1, + length = array == null ? 0 : array.length; + + while (++index < length) { + if (!predicate(array[index], index, array)) { + return false; + } + } + return true; + } + + /** + * A specialized version of `_.filter` for arrays without support for + * iteratee shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} predicate The function invoked per iteration. + * @returns {Array} Returns the new filtered array. + */ + function arrayFilter(array, predicate) { + var index = -1, + length = array == null ? 0 : array.length, + resIndex = 0, + result = []; + + while (++index < length) { + var value = array[index]; + if (predicate(value, index, array)) { + result[resIndex++] = value; + } + } + return result; + } + + /** + * A specialized version of `_.includes` for arrays without support for + * specifying an index to search from. + * + * @private + * @param {Array} [array] The array to inspect. + * @param {*} target The value to search for. + * @returns {boolean} Returns `true` if `target` is found, else `false`. + */ + function arrayIncludes(array, value) { + var length = array == null ? 0 : array.length; + return !!length && baseIndexOf(array, value, 0) > -1; + } + + /** + * This function is like `arrayIncludes` except that it accepts a comparator. + * + * @private + * @param {Array} [array] The array to inspect. + * @param {*} target The value to search for. + * @param {Function} comparator The comparator invoked per element. + * @returns {boolean} Returns `true` if `target` is found, else `false`. + */ + function arrayIncludesWith(array, value, comparator) { + var index = -1, + length = array == null ? 0 : array.length; + + while (++index < length) { + if (comparator(value, array[index])) { + return true; + } + } + return false; + } + + /** + * A specialized version of `_.map` for arrays without support for iteratee + * shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array} Returns the new mapped array. + */ + function arrayMap(array, iteratee) { + var index = -1, + length = array == null ? 0 : array.length, + result = Array(length); + + while (++index < length) { + result[index] = iteratee(array[index], index, array); + } + return result; + } + + /** + * Appends the elements of `values` to `array`. + * + * @private + * @param {Array} array The array to modify. + * @param {Array} values The values to append. + * @returns {Array} Returns `array`. + */ + function arrayPush(array, values) { + var index = -1, + length = values.length, + offset = array.length; + + while (++index < length) { + array[offset + index] = values[index]; + } + return array; + } + + /** + * A specialized version of `_.reduce` for arrays without support for + * iteratee shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @param {*} [accumulator] The initial value. + * @param {boolean} [initAccum] Specify using the first element of `array` as + * the initial value. + * @returns {*} Returns the accumulated value. + */ + function arrayReduce(array, iteratee, accumulator, initAccum) { + var index = -1, + length = array == null ? 0 : array.length; + + if (initAccum && length) { + accumulator = array[++index]; + } + while (++index < length) { + accumulator = iteratee(accumulator, array[index], index, array); + } + return accumulator; + } + + /** + * A specialized version of `_.reduceRight` for arrays without support for + * iteratee shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @param {*} [accumulator] The initial value. + * @param {boolean} [initAccum] Specify using the last element of `array` as + * the initial value. + * @returns {*} Returns the accumulated value. + */ + function arrayReduceRight(array, iteratee, accumulator, initAccum) { + var length = array == null ? 0 : array.length; + if (initAccum && length) { + accumulator = array[--length]; + } + while (length--) { + accumulator = iteratee(accumulator, array[length], length, array); + } + return accumulator; + } + + /** + * A specialized version of `_.some` for arrays without support for iteratee + * shorthands. + * + * @private + * @param {Array} [array] The array to iterate over. + * @param {Function} predicate The function invoked per iteration. + * @returns {boolean} Returns `true` if any element passes the predicate check, + * else `false`. + */ + function arraySome(array, predicate) { + var index = -1, + length = array == null ? 0 : array.length; + + while (++index < length) { + if (predicate(array[index], index, array)) { + return true; + } + } + return false; + } + + /** + * Gets the size of an ASCII `string`. + * + * @private + * @param {string} string The string inspect. + * @returns {number} Returns the string size. + */ + var asciiSize = baseProperty('length'); + + /** + * Converts an ASCII `string` to an array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the converted array. + */ + function asciiToArray(string) { + return string.split(''); + } + + /** + * Splits an ASCII `string` into an array of its words. + * + * @private + * @param {string} The string to inspect. + * @returns {Array} Returns the words of `string`. + */ + function asciiWords(string) { + return string.match(reAsciiWord) || []; + } + + /** + * The base implementation of methods like `_.findKey` and `_.findLastKey`, + * without support for iteratee shorthands, which iterates over `collection` + * using `eachFunc`. + * + * @private + * @param {Array|Object} collection The collection to inspect. + * @param {Function} predicate The function invoked per iteration. + * @param {Function} eachFunc The function to iterate over `collection`. + * @returns {*} Returns the found element or its key, else `undefined`. + */ + function baseFindKey(collection, predicate, eachFunc) { + var result; + eachFunc(collection, function(value, key, collection) { + if (predicate(value, key, collection)) { + result = key; + return false; + } + }); + return result; + } + + /** + * The base implementation of `_.findIndex` and `_.findLastIndex` without + * support for iteratee shorthands. + * + * @private + * @param {Array} array The array to inspect. + * @param {Function} predicate The function invoked per iteration. + * @param {number} fromIndex The index to search from. + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {number} Returns the index of the matched value, else `-1`. + */ + function baseFindIndex(array, predicate, fromIndex, fromRight) { + var length = array.length, + index = fromIndex + (fromRight ? 1 : -1); + + while ((fromRight ? index-- : ++index < length)) { + if (predicate(array[index], index, array)) { + return index; + } + } + return -1; + } + + /** + * The base implementation of `_.indexOf` without `fromIndex` bounds checks. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} fromIndex The index to search from. + * @returns {number} Returns the index of the matched value, else `-1`. + */ + function baseIndexOf(array, value, fromIndex) { + return value === value + ? strictIndexOf(array, value, fromIndex) + : baseFindIndex(array, baseIsNaN, fromIndex); + } + + /** + * This function is like `baseIndexOf` except that it accepts a comparator. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} fromIndex The index to search from. + * @param {Function} comparator The comparator invoked per element. + * @returns {number} Returns the index of the matched value, else `-1`. + */ + function baseIndexOfWith(array, value, fromIndex, comparator) { + var index = fromIndex - 1, + length = array.length; + + while (++index < length) { + if (comparator(array[index], value)) { + return index; + } + } + return -1; + } + + /** + * The base implementation of `_.isNaN` without support for number objects. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is `NaN`, else `false`. + */ + function baseIsNaN(value) { + return value !== value; + } + + /** + * The base implementation of `_.mean` and `_.meanBy` without support for + * iteratee shorthands. + * + * @private + * @param {Array} array The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {number} Returns the mean. + */ + function baseMean(array, iteratee) { + var length = array == null ? 0 : array.length; + return length ? (baseSum(array, iteratee) / length) : NAN; + } + + /** + * The base implementation of `_.property` without support for deep paths. + * + * @private + * @param {string} key The key of the property to get. + * @returns {Function} Returns the new accessor function. + */ + function baseProperty(key) { + return function(object) { + return object == null ? undefined : object[key]; + }; + } + + /** + * The base implementation of `_.propertyOf` without support for deep paths. + * + * @private + * @param {Object} object The object to query. + * @returns {Function} Returns the new accessor function. + */ + function basePropertyOf(object) { + return function(key) { + return object == null ? undefined : object[key]; + }; + } + + /** + * The base implementation of `_.reduce` and `_.reduceRight`, without support + * for iteratee shorthands, which iterates over `collection` using `eachFunc`. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @param {*} accumulator The initial value. + * @param {boolean} initAccum Specify using the first or last element of + * `collection` as the initial value. + * @param {Function} eachFunc The function to iterate over `collection`. + * @returns {*} Returns the accumulated value. + */ + function baseReduce(collection, iteratee, accumulator, initAccum, eachFunc) { + eachFunc(collection, function(value, index, collection) { + accumulator = initAccum + ? (initAccum = false, value) + : iteratee(accumulator, value, index, collection); + }); + return accumulator; + } + + /** + * The base implementation of `_.sortBy` which uses `comparer` to define the + * sort order of `array` and replaces criteria objects with their corresponding + * values. + * + * @private + * @param {Array} array The array to sort. + * @param {Function} comparer The function to define sort order. + * @returns {Array} Returns `array`. + */ + function baseSortBy(array, comparer) { + var length = array.length; + + array.sort(comparer); + while (length--) { + array[length] = array[length].value; + } + return array; + } + + /** + * The base implementation of `_.sum` and `_.sumBy` without support for + * iteratee shorthands. + * + * @private + * @param {Array} array The array to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {number} Returns the sum. + */ + function baseSum(array, iteratee) { + var result, + index = -1, + length = array.length; + + while (++index < length) { + var current = iteratee(array[index]); + if (current !== undefined) { + result = result === undefined ? current : (result + current); + } + } + return result; + } + + /** + * The base implementation of `_.times` without support for iteratee shorthands + * or max array length checks. + * + * @private + * @param {number} n The number of times to invoke `iteratee`. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array} Returns the array of results. + */ + function baseTimes(n, iteratee) { + var index = -1, + result = Array(n); + + while (++index < n) { + result[index] = iteratee(index); + } + return result; + } + + /** + * The base implementation of `_.toPairs` and `_.toPairsIn` which creates an array + * of key-value pairs for `object` corresponding to the property names of `props`. + * + * @private + * @param {Object} object The object to query. + * @param {Array} props The property names to get values for. + * @returns {Object} Returns the key-value pairs. + */ + function baseToPairs(object, props) { + return arrayMap(props, function(key) { + return [key, object[key]]; + }); + } + + /** + * The base implementation of `_.unary` without support for storing metadata. + * + * @private + * @param {Function} func The function to cap arguments for. + * @returns {Function} Returns the new capped function. + */ + function baseUnary(func) { + return function(value) { + return func(value); + }; + } + + /** + * The base implementation of `_.values` and `_.valuesIn` which creates an + * array of `object` property values corresponding to the property names + * of `props`. + * + * @private + * @param {Object} object The object to query. + * @param {Array} props The property names to get values for. + * @returns {Object} Returns the array of property values. + */ + function baseValues(object, props) { + return arrayMap(props, function(key) { + return object[key]; + }); + } + + /** + * Checks if a `cache` value for `key` exists. + * + * @private + * @param {Object} cache The cache to query. + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */ + function cacheHas(cache, key) { + return cache.has(key); + } + + /** + * Used by `_.trim` and `_.trimStart` to get the index of the first string symbol + * that is not found in the character symbols. + * + * @private + * @param {Array} strSymbols The string symbols to inspect. + * @param {Array} chrSymbols The character symbols to find. + * @returns {number} Returns the index of the first unmatched string symbol. + */ + function charsStartIndex(strSymbols, chrSymbols) { + var index = -1, + length = strSymbols.length; + + while (++index < length && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {} + return index; + } + + /** + * Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol + * that is not found in the character symbols. + * + * @private + * @param {Array} strSymbols The string symbols to inspect. + * @param {Array} chrSymbols The character symbols to find. + * @returns {number} Returns the index of the last unmatched string symbol. + */ + function charsEndIndex(strSymbols, chrSymbols) { + var index = strSymbols.length; + + while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {} + return index; + } + + /** + * Gets the number of `placeholder` occurrences in `array`. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} placeholder The placeholder to search for. + * @returns {number} Returns the placeholder count. + */ + function countHolders(array, placeholder) { + var length = array.length, + result = 0; + + while (length--) { + if (array[length] === placeholder) { + ++result; + } + } + return result; + } + + /** + * Used by `_.deburr` to convert Latin-1 Supplement and Latin Extended-A + * letters to basic Latin letters. + * + * @private + * @param {string} letter The matched letter to deburr. + * @returns {string} Returns the deburred letter. + */ + var deburrLetter = basePropertyOf(deburredLetters); + + /** + * Used by `_.escape` to convert characters to HTML entities. + * + * @private + * @param {string} chr The matched character to escape. + * @returns {string} Returns the escaped character. + */ + var escapeHtmlChar = basePropertyOf(htmlEscapes); + + /** + * Used by `_.template` to escape characters for inclusion in compiled string literals. + * + * @private + * @param {string} chr The matched character to escape. + * @returns {string} Returns the escaped character. + */ + function escapeStringChar(chr) { + return '\\' + stringEscapes[chr]; + } + + /** + * Gets the value at `key` of `object`. + * + * @private + * @param {Object} [object] The object to query. + * @param {string} key The key of the property to get. + * @returns {*} Returns the property value. + */ + function getValue(object, key) { + return object == null ? undefined : object[key]; + } + + /** + * Checks if `string` contains Unicode symbols. + * + * @private + * @param {string} string The string to inspect. + * @returns {boolean} Returns `true` if a symbol is found, else `false`. + */ + function hasUnicode(string) { + return reHasUnicode.test(string); + } + + /** + * Checks if `string` contains a word composed of Unicode symbols. + * + * @private + * @param {string} string The string to inspect. + * @returns {boolean} Returns `true` if a word is found, else `false`. + */ + function hasUnicodeWord(string) { + return reHasUnicodeWord.test(string); + } + + /** + * Converts `iterator` to an array. + * + * @private + * @param {Object} iterator The iterator to convert. + * @returns {Array} Returns the converted array. + */ + function iteratorToArray(iterator) { + var data, + result = []; + + while (!(data = iterator.next()).done) { + result.push(data.value); + } + return result; + } + + /** + * Converts `map` to its key-value pairs. + * + * @private + * @param {Object} map The map to convert. + * @returns {Array} Returns the key-value pairs. + */ + function mapToArray(map) { + var index = -1, + result = Array(map.size); + + map.forEach(function(value, key) { + result[++index] = [key, value]; + }); + return result; + } + + /** + * Creates a unary function that invokes `func` with its argument transformed. + * + * @private + * @param {Function} func The function to wrap. + * @param {Function} transform The argument transform. + * @returns {Function} Returns the new function. + */ + function overArg(func, transform) { + return function(arg) { + return func(transform(arg)); + }; + } + + /** + * Replaces all `placeholder` elements in `array` with an internal placeholder + * and returns an array of their indexes. + * + * @private + * @param {Array} array The array to modify. + * @param {*} placeholder The placeholder to replace. + * @returns {Array} Returns the new array of placeholder indexes. + */ + function replaceHolders(array, placeholder) { + var index = -1, + length = array.length, + resIndex = 0, + result = []; + + while (++index < length) { + var value = array[index]; + if (value === placeholder || value === PLACEHOLDER) { + array[index] = PLACEHOLDER; + result[resIndex++] = index; + } + } + return result; + } + + /** + * Gets the value at `key`, unless `key` is "__proto__". + * + * @private + * @param {Object} object The object to query. + * @param {string} key The key of the property to get. + * @returns {*} Returns the property value. + */ + function safeGet(object, key) { + return key == '__proto__' + ? undefined + : object[key]; + } + + /** + * Converts `set` to an array of its values. + * + * @private + * @param {Object} set The set to convert. + * @returns {Array} Returns the values. + */ + function setToArray(set) { + var index = -1, + result = Array(set.size); + + set.forEach(function(value) { + result[++index] = value; + }); + return result; + } + + /** + * Converts `set` to its value-value pairs. + * + * @private + * @param {Object} set The set to convert. + * @returns {Array} Returns the value-value pairs. + */ + function setToPairs(set) { + var index = -1, + result = Array(set.size); + + set.forEach(function(value) { + result[++index] = [value, value]; + }); + return result; + } + + /** + * A specialized version of `_.indexOf` which performs strict equality + * comparisons of values, i.e. `===`. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} fromIndex The index to search from. + * @returns {number} Returns the index of the matched value, else `-1`. + */ + function strictIndexOf(array, value, fromIndex) { + var index = fromIndex - 1, + length = array.length; + + while (++index < length) { + if (array[index] === value) { + return index; + } + } + return -1; + } + + /** + * A specialized version of `_.lastIndexOf` which performs strict equality + * comparisons of values, i.e. `===`. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} fromIndex The index to search from. + * @returns {number} Returns the index of the matched value, else `-1`. + */ + function strictLastIndexOf(array, value, fromIndex) { + var index = fromIndex + 1; + while (index--) { + if (array[index] === value) { + return index; + } + } + return index; + } + + /** + * Gets the number of symbols in `string`. + * + * @private + * @param {string} string The string to inspect. + * @returns {number} Returns the string size. + */ + function stringSize(string) { + return hasUnicode(string) + ? unicodeSize(string) + : asciiSize(string); + } + + /** + * Converts `string` to an array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the converted array. + */ + function stringToArray(string) { + return hasUnicode(string) + ? unicodeToArray(string) + : asciiToArray(string); + } + + /** + * Used by `_.unescape` to convert HTML entities to characters. + * + * @private + * @param {string} chr The matched character to unescape. + * @returns {string} Returns the unescaped character. + */ + var unescapeHtmlChar = basePropertyOf(htmlUnescapes); + + /** + * Gets the size of a Unicode `string`. + * + * @private + * @param {string} string The string inspect. + * @returns {number} Returns the string size. + */ + function unicodeSize(string) { + var result = reUnicode.lastIndex = 0; + while (reUnicode.test(string)) { + ++result; + } + return result; + } + + /** + * Converts a Unicode `string` to an array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the converted array. + */ + function unicodeToArray(string) { + return string.match(reUnicode) || []; + } + + /** + * Splits a Unicode `string` into an array of its words. + * + * @private + * @param {string} The string to inspect. + * @returns {Array} Returns the words of `string`. + */ + function unicodeWords(string) { + return string.match(reUnicodeWord) || []; + } + + /*--------------------------------------------------------------------------*/ + + /** + * Create a new pristine `lodash` function using the `context` object. + * + * @static + * @memberOf _ + * @since 1.1.0 + * @category Util + * @param {Object} [context=root] The context object. + * @returns {Function} Returns a new `lodash` function. + * @example + * + * _.mixin({ 'foo': _.constant('foo') }); + * + * var lodash = _.runInContext(); + * lodash.mixin({ 'bar': lodash.constant('bar') }); + * + * _.isFunction(_.foo); + * // => true + * _.isFunction(_.bar); + * // => false + * + * lodash.isFunction(lodash.foo); + * // => false + * lodash.isFunction(lodash.bar); + * // => true + * + * // Create a suped-up `defer` in Node.js. + * var defer = _.runInContext({ 'setTimeout': setImmediate }).defer; + */ + var runInContext = (function runInContext(context) { + context = context == null ? root : _.defaults(root.Object(), context, _.pick(root, contextProps)); + + /** Built-in constructor references. */ + var Array = context.Array, + Date = context.Date, + Error = context.Error, + Function = context.Function, + Math = context.Math, + Object = context.Object, + RegExp = context.RegExp, + String = context.String, + TypeError = context.TypeError; + + /** Used for built-in method references. */ + var arrayProto = Array.prototype, + funcProto = Function.prototype, + objectProto = Object.prototype; + + /** Used to detect overreaching core-js shims. */ + var coreJsData = context['__core-js_shared__']; + + /** Used to resolve the decompiled source of functions. */ + var funcToString = funcProto.toString; + + /** Used to check objects for own properties. */ + var hasOwnProperty = objectProto.hasOwnProperty; + + /** Used to generate unique IDs. */ + var idCounter = 0; + + /** Used to detect methods masquerading as native. */ + var maskSrcKey = (function() { + var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || ''); + return uid ? ('Symbol(src)_1.' + uid) : ''; + }()); + + /** + * Used to resolve the + * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) + * of values. + */ + var nativeObjectToString = objectProto.toString; + + /** Used to infer the `Object` constructor. */ + var objectCtorString = funcToString.call(Object); + + /** Used to restore the original `_` reference in `_.noConflict`. */ + var oldDash = root._; + + /** Used to detect if a method is native. */ + var reIsNative = RegExp('^' + + funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\$&') + .replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, '$1.*?') + '$' + ); + + /** Built-in value references. */ + var Buffer = moduleExports ? context.Buffer : undefined, + Symbol = context.Symbol, + Uint8Array = context.Uint8Array, + allocUnsafe = Buffer ? Buffer.allocUnsafe : undefined, + getPrototype = overArg(Object.getPrototypeOf, Object), + objectCreate = Object.create, + propertyIsEnumerable = objectProto.propertyIsEnumerable, + splice = arrayProto.splice, + spreadableSymbol = Symbol ? Symbol.isConcatSpreadable : undefined, + symIterator = Symbol ? Symbol.iterator : undefined, + symToStringTag = Symbol ? Symbol.toStringTag : undefined; + + var defineProperty = (function() { + try { + var func = getNative(Object, 'defineProperty'); + func({}, '', {}); + return func; + } catch (e) {} + }()); + + /** Mocked built-ins. */ + var ctxClearTimeout = context.clearTimeout !== root.clearTimeout && context.clearTimeout, + ctxNow = Date && Date.now !== root.Date.now && Date.now, + ctxSetTimeout = context.setTimeout !== root.setTimeout && context.setTimeout; + + /* Built-in method references for those with the same name as other `lodash` methods. */ + var nativeCeil = Math.ceil, + nativeFloor = Math.floor, + nativeGetSymbols = Object.getOwnPropertySymbols, + nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined, + nativeIsFinite = context.isFinite, + nativeJoin = arrayProto.join, + nativeKeys = overArg(Object.keys, Object), + nativeMax = Math.max, + nativeMin = Math.min, + nativeNow = Date.now, + nativeParseInt = context.parseInt, + nativeRandom = Math.random, + nativeReverse = arrayProto.reverse; + + /* Built-in method references that are verified to be native. */ + var DataView = getNative(context, 'DataView'), + Map = getNative(context, 'Map'), + Promise = getNative(context, 'Promise'), + Set = getNative(context, 'Set'), + WeakMap = getNative(context, 'WeakMap'), + nativeCreate = getNative(Object, 'create'); + + /** Used to store function metadata. */ + var metaMap = WeakMap && new WeakMap; + + /** Used to lookup unminified function names. */ + var realNames = {}; + + /** Used to detect maps, sets, and weakmaps. */ + var dataViewCtorString = toSource(DataView), + mapCtorString = toSource(Map), + promiseCtorString = toSource(Promise), + setCtorString = toSource(Set), + weakMapCtorString = toSource(WeakMap); + + /** Used to convert symbols to primitives and strings. */ + var symbolProto = Symbol ? Symbol.prototype : undefined, + symbolValueOf = symbolProto ? symbolProto.valueOf : undefined, + symbolToString = symbolProto ? symbolProto.toString : undefined; + + /*------------------------------------------------------------------------*/ + + /** + * Creates a `lodash` object which wraps `value` to enable implicit method + * chain sequences. Methods that operate on and return arrays, collections, + * and functions can be chained together. Methods that retrieve a single value + * or may return a primitive value will automatically end the chain sequence + * and return the unwrapped value. Otherwise, the value must be unwrapped + * with `_#value`. + * + * Explicit chain sequences, which must be unwrapped with `_#value`, may be + * enabled using `_.chain`. + * + * The execution of chained methods is lazy, that is, it's deferred until + * `_#value` is implicitly or explicitly called. + * + * Lazy evaluation allows several methods to support shortcut fusion. + * Shortcut fusion is an optimization to merge iteratee calls; this avoids + * the creation of intermediate arrays and can greatly reduce the number of + * iteratee executions. Sections of a chain sequence qualify for shortcut + * fusion if the section is applied to an array and iteratees accept only + * one argument. The heuristic for whether a section qualifies for shortcut + * fusion is subject to change. + * + * Chaining is supported in custom builds as long as the `_#value` method is + * directly or indirectly included in the build. + * + * In addition to lodash methods, wrappers have `Array` and `String` methods. + * + * The wrapper `Array` methods are: + * `concat`, `join`, `pop`, `push`, `shift`, `sort`, `splice`, and `unshift` + * + * The wrapper `String` methods are: + * `replace` and `split` + * + * The wrapper methods that support shortcut fusion are: + * `at`, `compact`, `drop`, `dropRight`, `dropWhile`, `filter`, `find`, + * `findLast`, `head`, `initial`, `last`, `map`, `reject`, `reverse`, `slice`, + * `tail`, `take`, `takeRight`, `takeRightWhile`, `takeWhile`, and `toArray` + * + * The chainable wrapper methods are: + * `after`, `ary`, `assign`, `assignIn`, `assignInWith`, `assignWith`, `at`, + * `before`, `bind`, `bindAll`, `bindKey`, `castArray`, `chain`, `chunk`, + * `commit`, `compact`, `concat`, `conforms`, `constant`, `countBy`, `create`, + * `curry`, `debounce`, `defaults`, `defaultsDeep`, `defer`, `delay`, + * `difference`, `differenceBy`, `differenceWith`, `drop`, `dropRight`, + * `dropRightWhile`, `dropWhile`, `extend`, `extendWith`, `fill`, `filter`, + * `flatMap`, `flatMapDeep`, `flatMapDepth`, `flatten`, `flattenDeep`, + * `flattenDepth`, `flip`, `flow`, `flowRight`, `fromPairs`, `functions`, + * `functionsIn`, `groupBy`, `initial`, `intersection`, `intersectionBy`, + * `intersectionWith`, `invert`, `invertBy`, `invokeMap`, `iteratee`, `keyBy`, + * `keys`, `keysIn`, `map`, `mapKeys`, `mapValues`, `matches`, `matchesProperty`, + * `memoize`, `merge`, `mergeWith`, `method`, `methodOf`, `mixin`, `negate`, + * `nthArg`, `omit`, `omitBy`, `once`, `orderBy`, `over`, `overArgs`, + * `overEvery`, `overSome`, `partial`, `partialRight`, `partition`, `pick`, + * `pickBy`, `plant`, `property`, `propertyOf`, `pull`, `pullAll`, `pullAllBy`, + * `pullAllWith`, `pullAt`, `push`, `range`, `rangeRight`, `rearg`, `reject`, + * `remove`, `rest`, `reverse`, `sampleSize`, `set`, `setWith`, `shuffle`, + * `slice`, `sort`, `sortBy`, `splice`, `spread`, `tail`, `take`, `takeRight`, + * `takeRightWhile`, `takeWhile`, `tap`, `throttle`, `thru`, `toArray`, + * `toPairs`, `toPairsIn`, `toPath`, `toPlainObject`, `transform`, `unary`, + * `union`, `unionBy`, `unionWith`, `uniq`, `uniqBy`, `uniqWith`, `unset`, + * `unshift`, `unzip`, `unzipWith`, `update`, `updateWith`, `values`, + * `valuesIn`, `without`, `wrap`, `xor`, `xorBy`, `xorWith`, `zip`, + * `zipObject`, `zipObjectDeep`, and `zipWith` + * + * The wrapper methods that are **not** chainable by default are: + * `add`, `attempt`, `camelCase`, `capitalize`, `ceil`, `clamp`, `clone`, + * `cloneDeep`, `cloneDeepWith`, `cloneWith`, `conformsTo`, `deburr`, + * `defaultTo`, `divide`, `each`, `eachRight`, `endsWith`, `eq`, `escape`, + * `escapeRegExp`, `every`, `find`, `findIndex`, `findKey`, `findLast`, + * `findLastIndex`, `findLastKey`, `first`, `floor`, `forEach`, `forEachRight`, + * `forIn`, `forInRight`, `forOwn`, `forOwnRight`, `get`, `gt`, `gte`, `has`, + * `hasIn`, `head`, `identity`, `includes`, `indexOf`, `inRange`, `invoke`, + * `isArguments`, `isArray`, `isArrayBuffer`, `isArrayLike`, `isArrayLikeObject`, + * `isBoolean`, `isBuffer`, `isDate`, `isElement`, `isEmpty`, `isEqual`, + * `isEqualWith`, `isError`, `isFinite`, `isFunction`, `isInteger`, `isLength`, + * `isMap`, `isMatch`, `isMatchWith`, `isNaN`, `isNative`, `isNil`, `isNull`, + * `isNumber`, `isObject`, `isObjectLike`, `isPlainObject`, `isRegExp`, + * `isSafeInteger`, `isSet`, `isString`, `isUndefined`, `isTypedArray`, + * `isWeakMap`, `isWeakSet`, `join`, `kebabCase`, `last`, `lastIndexOf`, + * `lowerCase`, `lowerFirst`, `lt`, `lte`, `max`, `maxBy`, `mean`, `meanBy`, + * `min`, `minBy`, `multiply`, `noConflict`, `noop`, `now`, `nth`, `pad`, + * `padEnd`, `padStart`, `parseInt`, `pop`, `random`, `reduce`, `reduceRight`, + * `repeat`, `result`, `round`, `runInContext`, `sample`, `shift`, `size`, + * `snakeCase`, `some`, `sortedIndex`, `sortedIndexBy`, `sortedLastIndex`, + * `sortedLastIndexBy`, `startCase`, `startsWith`, `stubArray`, `stubFalse`, + * `stubObject`, `stubString`, `stubTrue`, `subtract`, `sum`, `sumBy`, + * `template`, `times`, `toFinite`, `toInteger`, `toJSON`, `toLength`, + * `toLower`, `toNumber`, `toSafeInteger`, `toString`, `toUpper`, `trim`, + * `trimEnd`, `trimStart`, `truncate`, `unescape`, `uniqueId`, `upperCase`, + * `upperFirst`, `value`, and `words` + * + * @name _ + * @constructor + * @category Seq + * @param {*} value The value to wrap in a `lodash` instance. + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * function square(n) { + * return n * n; + * } + * + * var wrapped = _([1, 2, 3]); + * + * // Returns an unwrapped value. + * wrapped.reduce(_.add); + * // => 6 + * + * // Returns a wrapped value. + * var squares = wrapped.map(square); + * + * _.isArray(squares); + * // => false + * + * _.isArray(squares.value()); + * // => true + */ + function lodash(value) { + if (isObjectLike(value) && !isArray(value) && !(value instanceof LazyWrapper)) { + if (value instanceof LodashWrapper) { + return value; + } + if (hasOwnProperty.call(value, '__wrapped__')) { + return wrapperClone(value); + } + } + return new LodashWrapper(value); + } + + /** + * The base implementation of `_.create` without support for assigning + * properties to the created object. + * + * @private + * @param {Object} proto The object to inherit from. + * @returns {Object} Returns the new object. + */ + var baseCreate = (function() { + function object() {} + return function(proto) { + if (!isObject(proto)) { + return {}; + } + if (objectCreate) { + return objectCreate(proto); + } + object.prototype = proto; + var result = new object; + object.prototype = undefined; + return result; + }; + }()); + + /** + * The function whose prototype chain sequence wrappers inherit from. + * + * @private + */ + function baseLodash() { + // No operation performed. + } + + /** + * The base constructor for creating `lodash` wrapper objects. + * + * @private + * @param {*} value The value to wrap. + * @param {boolean} [chainAll] Enable explicit method chain sequences. + */ + function LodashWrapper(value, chainAll) { + this.__wrapped__ = value; + this.__actions__ = []; + this.__chain__ = !!chainAll; + this.__index__ = 0; + this.__values__ = undefined; + } + + /** + * By default, the template delimiters used by lodash are like those in + * embedded Ruby (ERB) as well as ES2015 template strings. Change the + * following template settings to use alternative delimiters. + * + * @static + * @memberOf _ + * @type {Object} + */ + lodash.templateSettings = { + + /** + * Used to detect `data` property values to be HTML-escaped. + * + * @memberOf _.templateSettings + * @type {RegExp} + */ + 'escape': reEscape, + + /** + * Used to detect code to be evaluated. + * + * @memberOf _.templateSettings + * @type {RegExp} + */ + 'evaluate': reEvaluate, + + /** + * Used to detect `data` property values to inject. + * + * @memberOf _.templateSettings + * @type {RegExp} + */ + 'interpolate': reInterpolate, + + /** + * Used to reference the data object in the template text. + * + * @memberOf _.templateSettings + * @type {string} + */ + 'variable': '', + + /** + * Used to import variables into the compiled template. + * + * @memberOf _.templateSettings + * @type {Object} + */ + 'imports': { + + /** + * A reference to the `lodash` function. + * + * @memberOf _.templateSettings.imports + * @type {Function} + */ + '_': lodash + } + }; + + // Ensure wrappers are instances of `baseLodash`. + lodash.prototype = baseLodash.prototype; + lodash.prototype.constructor = lodash; + + LodashWrapper.prototype = baseCreate(baseLodash.prototype); + LodashWrapper.prototype.constructor = LodashWrapper; + + /*------------------------------------------------------------------------*/ + + /** + * Creates a lazy wrapper object which wraps `value` to enable lazy evaluation. + * + * @private + * @constructor + * @param {*} value The value to wrap. + */ + function LazyWrapper(value) { + this.__wrapped__ = value; + this.__actions__ = []; + this.__dir__ = 1; + this.__filtered__ = false; + this.__iteratees__ = []; + this.__takeCount__ = MAX_ARRAY_LENGTH; + this.__views__ = []; + } + + /** + * Creates a clone of the lazy wrapper object. + * + * @private + * @name clone + * @memberOf LazyWrapper + * @returns {Object} Returns the cloned `LazyWrapper` object. + */ + function lazyClone() { + var result = new LazyWrapper(this.__wrapped__); + result.__actions__ = copyArray(this.__actions__); + result.__dir__ = this.__dir__; + result.__filtered__ = this.__filtered__; + result.__iteratees__ = copyArray(this.__iteratees__); + result.__takeCount__ = this.__takeCount__; + result.__views__ = copyArray(this.__views__); + return result; + } + + /** + * Reverses the direction of lazy iteration. + * + * @private + * @name reverse + * @memberOf LazyWrapper + * @returns {Object} Returns the new reversed `LazyWrapper` object. + */ + function lazyReverse() { + if (this.__filtered__) { + var result = new LazyWrapper(this); + result.__dir__ = -1; + result.__filtered__ = true; + } else { + result = this.clone(); + result.__dir__ *= -1; + } + return result; + } + + /** + * Extracts the unwrapped value from its lazy wrapper. + * + * @private + * @name value + * @memberOf LazyWrapper + * @returns {*} Returns the unwrapped value. + */ + function lazyValue() { + var array = this.__wrapped__.value(), + dir = this.__dir__, + isArr = isArray(array), + isRight = dir < 0, + arrLength = isArr ? array.length : 0, + view = getView(0, arrLength, this.__views__), + start = view.start, + end = view.end, + length = end - start, + index = isRight ? end : (start - 1), + iteratees = this.__iteratees__, + iterLength = iteratees.length, + resIndex = 0, + takeCount = nativeMin(length, this.__takeCount__); + + if (!isArr || (!isRight && arrLength == length && takeCount == length)) { + return baseWrapperValue(array, this.__actions__); + } + var result = []; + + outer: + while (length-- && resIndex < takeCount) { + index += dir; + + var iterIndex = -1, + value = array[index]; + + while (++iterIndex < iterLength) { + var data = iteratees[iterIndex], + iteratee = data.iteratee, + type = data.type, + computed = iteratee(value); + + if (type == LAZY_MAP_FLAG) { + value = computed; + } else if (!computed) { + if (type == LAZY_FILTER_FLAG) { + continue outer; + } else { + break outer; + } + } + } + result[resIndex++] = value; + } + return result; + } + + // Ensure `LazyWrapper` is an instance of `baseLodash`. + LazyWrapper.prototype = baseCreate(baseLodash.prototype); + LazyWrapper.prototype.constructor = LazyWrapper; + + /*------------------------------------------------------------------------*/ + + /** + * Creates a hash object. + * + * @private + * @constructor + * @param {Array} [entries] The key-value pairs to cache. + */ + function Hash(entries) { + var index = -1, + length = entries == null ? 0 : entries.length; + + this.clear(); + while (++index < length) { + var entry = entries[index]; + this.set(entry[0], entry[1]); + } + } + + /** + * Removes all key-value entries from the hash. + * + * @private + * @name clear + * @memberOf Hash + */ + function hashClear() { + this.__data__ = nativeCreate ? nativeCreate(null) : {}; + this.size = 0; + } + + /** + * Removes `key` and its value from the hash. + * + * @private + * @name delete + * @memberOf Hash + * @param {Object} hash The hash to modify. + * @param {string} key The key of the value to remove. + * @returns {boolean} Returns `true` if the entry was removed, else `false`. + */ + function hashDelete(key) { + var result = this.has(key) && delete this.__data__[key]; + this.size -= result ? 1 : 0; + return result; + } + + /** + * Gets the hash value for `key`. + * + * @private + * @name get + * @memberOf Hash + * @param {string} key The key of the value to get. + * @returns {*} Returns the entry value. + */ + function hashGet(key) { + var data = this.__data__; + if (nativeCreate) { + var result = data[key]; + return result === HASH_UNDEFINED ? undefined : result; + } + return hasOwnProperty.call(data, key) ? data[key] : undefined; + } + + /** + * Checks if a hash value for `key` exists. + * + * @private + * @name has + * @memberOf Hash + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */ + function hashHas(key) { + var data = this.__data__; + return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key); + } + + /** + * Sets the hash `key` to `value`. + * + * @private + * @name set + * @memberOf Hash + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the hash instance. + */ + function hashSet(key, value) { + var data = this.__data__; + this.size += this.has(key) ? 0 : 1; + data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value; + return this; + } + + // Add methods to `Hash`. + Hash.prototype.clear = hashClear; + Hash.prototype['delete'] = hashDelete; + Hash.prototype.get = hashGet; + Hash.prototype.has = hashHas; + Hash.prototype.set = hashSet; + + /*------------------------------------------------------------------------*/ + + /** + * Creates an list cache object. + * + * @private + * @constructor + * @param {Array} [entries] The key-value pairs to cache. + */ + function ListCache(entries) { + var index = -1, + length = entries == null ? 0 : entries.length; + + this.clear(); + while (++index < length) { + var entry = entries[index]; + this.set(entry[0], entry[1]); + } + } + + /** + * Removes all key-value entries from the list cache. + * + * @private + * @name clear + * @memberOf ListCache + */ + function listCacheClear() { + this.__data__ = []; + this.size = 0; + } + + /** + * Removes `key` and its value from the list cache. + * + * @private + * @name delete + * @memberOf ListCache + * @param {string} key The key of the value to remove. + * @returns {boolean} Returns `true` if the entry was removed, else `false`. + */ + function listCacheDelete(key) { + var data = this.__data__, + index = assocIndexOf(data, key); + + if (index < 0) { + return false; + } + var lastIndex = data.length - 1; + if (index == lastIndex) { + data.pop(); + } else { + splice.call(data, index, 1); + } + --this.size; + return true; + } + + /** + * Gets the list cache value for `key`. + * + * @private + * @name get + * @memberOf ListCache + * @param {string} key The key of the value to get. + * @returns {*} Returns the entry value. + */ + function listCacheGet(key) { + var data = this.__data__, + index = assocIndexOf(data, key); + + return index < 0 ? undefined : data[index][1]; + } + + /** + * Checks if a list cache value for `key` exists. + * + * @private + * @name has + * @memberOf ListCache + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */ + function listCacheHas(key) { + return assocIndexOf(this.__data__, key) > -1; + } + + /** + * Sets the list cache `key` to `value`. + * + * @private + * @name set + * @memberOf ListCache + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the list cache instance. + */ + function listCacheSet(key, value) { + var data = this.__data__, + index = assocIndexOf(data, key); + + if (index < 0) { + ++this.size; + data.push([key, value]); + } else { + data[index][1] = value; + } + return this; + } + + // Add methods to `ListCache`. + ListCache.prototype.clear = listCacheClear; + ListCache.prototype['delete'] = listCacheDelete; + ListCache.prototype.get = listCacheGet; + ListCache.prototype.has = listCacheHas; + ListCache.prototype.set = listCacheSet; + + /*------------------------------------------------------------------------*/ + + /** + * Creates a map cache object to store key-value pairs. + * + * @private + * @constructor + * @param {Array} [entries] The key-value pairs to cache. + */ + function MapCache(entries) { + var index = -1, + length = entries == null ? 0 : entries.length; + + this.clear(); + while (++index < length) { + var entry = entries[index]; + this.set(entry[0], entry[1]); + } + } + + /** + * Removes all key-value entries from the map. + * + * @private + * @name clear + * @memberOf MapCache + */ + function mapCacheClear() { + this.size = 0; + this.__data__ = { + 'hash': new Hash, + 'map': new (Map || ListCache), + 'string': new Hash + }; + } + + /** + * Removes `key` and its value from the map. + * + * @private + * @name delete + * @memberOf MapCache + * @param {string} key The key of the value to remove. + * @returns {boolean} Returns `true` if the entry was removed, else `false`. + */ + function mapCacheDelete(key) { + var result = getMapData(this, key)['delete'](key); + this.size -= result ? 1 : 0; + return result; + } + + /** + * Gets the map value for `key`. + * + * @private + * @name get + * @memberOf MapCache + * @param {string} key The key of the value to get. + * @returns {*} Returns the entry value. + */ + function mapCacheGet(key) { + return getMapData(this, key).get(key); + } + + /** + * Checks if a map value for `key` exists. + * + * @private + * @name has + * @memberOf MapCache + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */ + function mapCacheHas(key) { + return getMapData(this, key).has(key); + } + + /** + * Sets the map `key` to `value`. + * + * @private + * @name set + * @memberOf MapCache + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the map cache instance. + */ + function mapCacheSet(key, value) { + var data = getMapData(this, key), + size = data.size; + + data.set(key, value); + this.size += data.size == size ? 0 : 1; + return this; + } + + // Add methods to `MapCache`. + MapCache.prototype.clear = mapCacheClear; + MapCache.prototype['delete'] = mapCacheDelete; + MapCache.prototype.get = mapCacheGet; + MapCache.prototype.has = mapCacheHas; + MapCache.prototype.set = mapCacheSet; + + /*------------------------------------------------------------------------*/ + + /** + * + * Creates an array cache object to store unique values. + * + * @private + * @constructor + * @param {Array} [values] The values to cache. + */ + function SetCache(values) { + var index = -1, + length = values == null ? 0 : values.length; + + this.__data__ = new MapCache; + while (++index < length) { + this.add(values[index]); + } + } + + /** + * Adds `value` to the array cache. + * + * @private + * @name add + * @memberOf SetCache + * @alias push + * @param {*} value The value to cache. + * @returns {Object} Returns the cache instance. + */ + function setCacheAdd(value) { + this.__data__.set(value, HASH_UNDEFINED); + return this; + } + + /** + * Checks if `value` is in the array cache. + * + * @private + * @name has + * @memberOf SetCache + * @param {*} value The value to search for. + * @returns {number} Returns `true` if `value` is found, else `false`. + */ + function setCacheHas(value) { + return this.__data__.has(value); + } + + // Add methods to `SetCache`. + SetCache.prototype.add = SetCache.prototype.push = setCacheAdd; + SetCache.prototype.has = setCacheHas; + + /*------------------------------------------------------------------------*/ + + /** + * Creates a stack cache object to store key-value pairs. + * + * @private + * @constructor + * @param {Array} [entries] The key-value pairs to cache. + */ + function Stack(entries) { + var data = this.__data__ = new ListCache(entries); + this.size = data.size; + } + + /** + * Removes all key-value entries from the stack. + * + * @private + * @name clear + * @memberOf Stack + */ + function stackClear() { + this.__data__ = new ListCache; + this.size = 0; + } + + /** + * Removes `key` and its value from the stack. + * + * @private + * @name delete + * @memberOf Stack + * @param {string} key The key of the value to remove. + * @returns {boolean} Returns `true` if the entry was removed, else `false`. + */ + function stackDelete(key) { + var data = this.__data__, + result = data['delete'](key); + + this.size = data.size; + return result; + } + + /** + * Gets the stack value for `key`. + * + * @private + * @name get + * @memberOf Stack + * @param {string} key The key of the value to get. + * @returns {*} Returns the entry value. + */ + function stackGet(key) { + return this.__data__.get(key); + } + + /** + * Checks if a stack value for `key` exists. + * + * @private + * @name has + * @memberOf Stack + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */ + function stackHas(key) { + return this.__data__.has(key); + } + + /** + * Sets the stack `key` to `value`. + * + * @private + * @name set + * @memberOf Stack + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the stack cache instance. + */ + function stackSet(key, value) { + var data = this.__data__; + if (data instanceof ListCache) { + var pairs = data.__data__; + if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) { + pairs.push([key, value]); + this.size = ++data.size; + return this; + } + data = this.__data__ = new MapCache(pairs); + } + data.set(key, value); + this.size = data.size; + return this; + } + + // Add methods to `Stack`. + Stack.prototype.clear = stackClear; + Stack.prototype['delete'] = stackDelete; + Stack.prototype.get = stackGet; + Stack.prototype.has = stackHas; + Stack.prototype.set = stackSet; + + /*------------------------------------------------------------------------*/ + + /** + * Creates an array of the enumerable property names of the array-like `value`. + * + * @private + * @param {*} value The value to query. + * @param {boolean} inherited Specify returning inherited property names. + * @returns {Array} Returns the array of property names. + */ + function arrayLikeKeys(value, inherited) { + var isArr = isArray(value), + isArg = !isArr && isArguments(value), + isBuff = !isArr && !isArg && isBuffer(value), + isType = !isArr && !isArg && !isBuff && isTypedArray(value), + skipIndexes = isArr || isArg || isBuff || isType, + result = skipIndexes ? baseTimes(value.length, String) : [], + length = result.length; + + for (var key in value) { + if ((inherited || hasOwnProperty.call(value, key)) && + !(skipIndexes && ( + // Safari 9 has enumerable `arguments.length` in strict mode. + key == 'length' || + // Node.js 0.10 has enumerable non-index properties on buffers. + (isBuff && (key == 'offset' || key == 'parent')) || + // PhantomJS 2 has enumerable non-index properties on typed arrays. + (isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) || + // Skip index properties. + isIndex(key, length) + ))) { + result.push(key); + } + } + return result; + } + + /** + * A specialized version of `_.sample` for arrays. + * + * @private + * @param {Array} array The array to sample. + * @returns {*} Returns the random element. + */ + function arraySample(array) { + var length = array.length; + return length ? array[baseRandom(0, length - 1)] : undefined; + } + + /** + * A specialized version of `_.sampleSize` for arrays. + * + * @private + * @param {Array} array The array to sample. + * @param {number} n The number of elements to sample. + * @returns {Array} Returns the random elements. + */ + function arraySampleSize(array, n) { + return shuffleSelf(copyArray(array), baseClamp(n, 0, array.length)); + } + + /** + * A specialized version of `_.shuffle` for arrays. + * + * @private + * @param {Array} array The array to shuffle. + * @returns {Array} Returns the new shuffled array. + */ + function arrayShuffle(array) { + return shuffleSelf(copyArray(array)); + } + + /** + * This function is like `assignValue` except that it doesn't assign + * `undefined` values. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ + function assignMergeValue(object, key, value) { + if ((value !== undefined && !eq(object[key], value)) || + (value === undefined && !(key in object))) { + baseAssignValue(object, key, value); + } + } + + /** + * Assigns `value` to `key` of `object` if the existing value is not equivalent + * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ + function assignValue(object, key, value) { + var objValue = object[key]; + if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) || + (value === undefined && !(key in object))) { + baseAssignValue(object, key, value); + } + } + + /** + * Gets the index at which the `key` is found in `array` of key-value pairs. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} key The key to search for. + * @returns {number} Returns the index of the matched value, else `-1`. + */ + function assocIndexOf(array, key) { + var length = array.length; + while (length--) { + if (eq(array[length][0], key)) { + return length; + } + } + return -1; + } + + /** + * Aggregates elements of `collection` on `accumulator` with keys transformed + * by `iteratee` and values set by `setter`. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} setter The function to set `accumulator` values. + * @param {Function} iteratee The iteratee to transform keys. + * @param {Object} accumulator The initial aggregated object. + * @returns {Function} Returns `accumulator`. + */ + function baseAggregator(collection, setter, iteratee, accumulator) { + baseEach(collection, function(value, key, collection) { + setter(accumulator, value, iteratee(value), collection); + }); + return accumulator; + } + + /** + * The base implementation of `_.assign` without support for multiple sources + * or `customizer` functions. + * + * @private + * @param {Object} object The destination object. + * @param {Object} source The source object. + * @returns {Object} Returns `object`. + */ + function baseAssign(object, source) { + return object && copyObject(source, keys(source), object); + } + + /** + * The base implementation of `_.assignIn` without support for multiple sources + * or `customizer` functions. + * + * @private + * @param {Object} object The destination object. + * @param {Object} source The source object. + * @returns {Object} Returns `object`. + */ + function baseAssignIn(object, source) { + return object && copyObject(source, keysIn(source), object); + } + + /** + * The base implementation of `assignValue` and `assignMergeValue` without + * value checks. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ + function baseAssignValue(object, key, value) { + if (key == '__proto__' && defineProperty) { + defineProperty(object, key, { + 'configurable': true, + 'enumerable': true, + 'value': value, + 'writable': true + }); + } else { + object[key] = value; + } + } + + /** + * The base implementation of `_.at` without support for individual paths. + * + * @private + * @param {Object} object The object to iterate over. + * @param {string[]} paths The property paths to pick. + * @returns {Array} Returns the picked elements. + */ + function baseAt(object, paths) { + var index = -1, + length = paths.length, + result = Array(length), + skip = object == null; + + while (++index < length) { + result[index] = skip ? undefined : get(object, paths[index]); + } + return result; + } + + /** + * The base implementation of `_.clamp` which doesn't coerce arguments. + * + * @private + * @param {number} number The number to clamp. + * @param {number} [lower] The lower bound. + * @param {number} upper The upper bound. + * @returns {number} Returns the clamped number. + */ + function baseClamp(number, lower, upper) { + if (number === number) { + if (upper !== undefined) { + number = number <= upper ? number : upper; + } + if (lower !== undefined) { + number = number >= lower ? number : lower; + } + } + return number; + } + + /** + * The base implementation of `_.clone` and `_.cloneDeep` which tracks + * traversed objects. + * + * @private + * @param {*} value The value to clone. + * @param {boolean} bitmask The bitmask flags. + * 1 - Deep clone + * 2 - Flatten inherited properties + * 4 - Clone symbols + * @param {Function} [customizer] The function to customize cloning. + * @param {string} [key] The key of `value`. + * @param {Object} [object] The parent object of `value`. + * @param {Object} [stack] Tracks traversed objects and their clone counterparts. + * @returns {*} Returns the cloned value. + */ + function baseClone(value, bitmask, customizer, key, object, stack) { + var result, + isDeep = bitmask & CLONE_DEEP_FLAG, + isFlat = bitmask & CLONE_FLAT_FLAG, + isFull = bitmask & CLONE_SYMBOLS_FLAG; + + if (customizer) { + result = object ? customizer(value, key, object, stack) : customizer(value); + } + if (result !== undefined) { + return result; + } + if (!isObject(value)) { + return value; + } + var isArr = isArray(value); + if (isArr) { + result = initCloneArray(value); + if (!isDeep) { + return copyArray(value, result); + } + } else { + var tag = getTag(value), + isFunc = tag == funcTag || tag == genTag; + + if (isBuffer(value)) { + return cloneBuffer(value, isDeep); + } + if (tag == objectTag || tag == argsTag || (isFunc && !object)) { + result = (isFlat || isFunc) ? {} : initCloneObject(value); + if (!isDeep) { + return isFlat + ? copySymbolsIn(value, baseAssignIn(result, value)) + : copySymbols(value, baseAssign(result, value)); + } + } else { + if (!cloneableTags[tag]) { + return object ? value : {}; + } + result = initCloneByTag(value, tag, isDeep); + } + } + // Check for circular references and return its corresponding clone. + stack || (stack = new Stack); + var stacked = stack.get(value); + if (stacked) { + return stacked; + } + stack.set(value, result); + + if (isSet(value)) { + value.forEach(function(subValue) { + result.add(baseClone(subValue, bitmask, customizer, subValue, value, stack)); + }); + + return result; + } + + if (isMap(value)) { + value.forEach(function(subValue, key) { + result.set(key, baseClone(subValue, bitmask, customizer, key, value, stack)); + }); + + return result; + } + + var keysFunc = isFull + ? (isFlat ? getAllKeysIn : getAllKeys) + : (isFlat ? keysIn : keys); + + var props = isArr ? undefined : keysFunc(value); + arrayEach(props || value, function(subValue, key) { + if (props) { + key = subValue; + subValue = value[key]; + } + // Recursively populate clone (susceptible to call stack limits). + assignValue(result, key, baseClone(subValue, bitmask, customizer, key, value, stack)); + }); + return result; + } + + /** + * The base implementation of `_.conforms` which doesn't clone `source`. + * + * @private + * @param {Object} source The object of property predicates to conform to. + * @returns {Function} Returns the new spec function. + */ + function baseConforms(source) { + var props = keys(source); + return function(object) { + return baseConformsTo(object, source, props); + }; + } + + /** + * The base implementation of `_.conformsTo` which accepts `props` to check. + * + * @private + * @param {Object} object The object to inspect. + * @param {Object} source The object of property predicates to conform to. + * @returns {boolean} Returns `true` if `object` conforms, else `false`. + */ + function baseConformsTo(object, source, props) { + var length = props.length; + if (object == null) { + return !length; + } + object = Object(object); + while (length--) { + var key = props[length], + predicate = source[key], + value = object[key]; + + if ((value === undefined && !(key in object)) || !predicate(value)) { + return false; + } + } + return true; + } + + /** + * The base implementation of `_.delay` and `_.defer` which accepts `args` + * to provide to `func`. + * + * @private + * @param {Function} func The function to delay. + * @param {number} wait The number of milliseconds to delay invocation. + * @param {Array} args The arguments to provide to `func`. + * @returns {number|Object} Returns the timer id or timeout object. + */ + function baseDelay(func, wait, args) { + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + return setTimeout(function() { func.apply(undefined, args); }, wait); + } + + /** + * The base implementation of methods like `_.difference` without support + * for excluding multiple arrays or iteratee shorthands. + * + * @private + * @param {Array} array The array to inspect. + * @param {Array} values The values to exclude. + * @param {Function} [iteratee] The iteratee invoked per element. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of filtered values. + */ + function baseDifference(array, values, iteratee, comparator) { + var index = -1, + includes = arrayIncludes, + isCommon = true, + length = array.length, + result = [], + valuesLength = values.length; + + if (!length) { + return result; + } + if (iteratee) { + values = arrayMap(values, baseUnary(iteratee)); + } + if (comparator) { + includes = arrayIncludesWith; + isCommon = false; + } + else if (values.length >= LARGE_ARRAY_SIZE) { + includes = cacheHas; + isCommon = false; + values = new SetCache(values); + } + outer: + while (++index < length) { + var value = array[index], + computed = iteratee == null ? value : iteratee(value); + + value = (comparator || value !== 0) ? value : 0; + if (isCommon && computed === computed) { + var valuesIndex = valuesLength; + while (valuesIndex--) { + if (values[valuesIndex] === computed) { + continue outer; + } + } + result.push(value); + } + else if (!includes(values, computed, comparator)) { + result.push(value); + } + } + return result; + } + + /** + * The base implementation of `_.forEach` without support for iteratee shorthands. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array|Object} Returns `collection`. + */ + var baseEach = createBaseEach(baseForOwn); + + /** + * The base implementation of `_.forEachRight` without support for iteratee shorthands. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array|Object} Returns `collection`. + */ + var baseEachRight = createBaseEach(baseForOwnRight, true); + + /** + * The base implementation of `_.every` without support for iteratee shorthands. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} predicate The function invoked per iteration. + * @returns {boolean} Returns `true` if all elements pass the predicate check, + * else `false` + */ + function baseEvery(collection, predicate) { + var result = true; + baseEach(collection, function(value, index, collection) { + result = !!predicate(value, index, collection); + return result; + }); + return result; + } + + /** + * The base implementation of methods like `_.max` and `_.min` which accepts a + * `comparator` to determine the extremum value. + * + * @private + * @param {Array} array The array to iterate over. + * @param {Function} iteratee The iteratee invoked per iteration. + * @param {Function} comparator The comparator used to compare values. + * @returns {*} Returns the extremum value. + */ + function baseExtremum(array, iteratee, comparator) { + var index = -1, + length = array.length; + + while (++index < length) { + var value = array[index], + current = iteratee(value); + + if (current != null && (computed === undefined + ? (current === current && !isSymbol(current)) + : comparator(current, computed) + )) { + var computed = current, + result = value; + } + } + return result; + } + + /** + * The base implementation of `_.fill` without an iteratee call guard. + * + * @private + * @param {Array} array The array to fill. + * @param {*} value The value to fill `array` with. + * @param {number} [start=0] The start position. + * @param {number} [end=array.length] The end position. + * @returns {Array} Returns `array`. + */ + function baseFill(array, value, start, end) { + var length = array.length; + + start = toInteger(start); + if (start < 0) { + start = -start > length ? 0 : (length + start); + } + end = (end === undefined || end > length) ? length : toInteger(end); + if (end < 0) { + end += length; + } + end = start > end ? 0 : toLength(end); + while (start < end) { + array[start++] = value; + } + return array; + } + + /** + * The base implementation of `_.filter` without support for iteratee shorthands. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} predicate The function invoked per iteration. + * @returns {Array} Returns the new filtered array. + */ + function baseFilter(collection, predicate) { + var result = []; + baseEach(collection, function(value, index, collection) { + if (predicate(value, index, collection)) { + result.push(value); + } + }); + return result; + } + + /** + * The base implementation of `_.flatten` with support for restricting flattening. + * + * @private + * @param {Array} array The array to flatten. + * @param {number} depth The maximum recursion depth. + * @param {boolean} [predicate=isFlattenable] The function invoked per iteration. + * @param {boolean} [isStrict] Restrict to values that pass `predicate` checks. + * @param {Array} [result=[]] The initial result value. + * @returns {Array} Returns the new flattened array. + */ + function baseFlatten(array, depth, predicate, isStrict, result) { + var index = -1, + length = array.length; + + predicate || (predicate = isFlattenable); + result || (result = []); + + while (++index < length) { + var value = array[index]; + if (depth > 0 && predicate(value)) { + if (depth > 1) { + // Recursively flatten arrays (susceptible to call stack limits). + baseFlatten(value, depth - 1, predicate, isStrict, result); + } else { + arrayPush(result, value); + } + } else if (!isStrict) { + result[result.length] = value; + } + } + return result; + } + + /** + * The base implementation of `baseForOwn` which iterates over `object` + * properties returned by `keysFunc` and invokes `iteratee` for each property. + * Iteratee functions may exit iteration early by explicitly returning `false`. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @param {Function} keysFunc The function to get the keys of `object`. + * @returns {Object} Returns `object`. + */ + var baseFor = createBaseFor(); + + /** + * This function is like `baseFor` except that it iterates over properties + * in the opposite order. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @param {Function} keysFunc The function to get the keys of `object`. + * @returns {Object} Returns `object`. + */ + var baseForRight = createBaseFor(true); + + /** + * The base implementation of `_.forOwn` without support for iteratee shorthands. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Object} Returns `object`. + */ + function baseForOwn(object, iteratee) { + return object && baseFor(object, iteratee, keys); + } + + /** + * The base implementation of `_.forOwnRight` without support for iteratee shorthands. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Object} Returns `object`. + */ + function baseForOwnRight(object, iteratee) { + return object && baseForRight(object, iteratee, keys); + } + + /** + * The base implementation of `_.functions` which creates an array of + * `object` function property names filtered from `props`. + * + * @private + * @param {Object} object The object to inspect. + * @param {Array} props The property names to filter. + * @returns {Array} Returns the function names. + */ + function baseFunctions(object, props) { + return arrayFilter(props, function(key) { + return isFunction(object[key]); + }); + } + + /** + * The base implementation of `_.get` without support for default values. + * + * @private + * @param {Object} object The object to query. + * @param {Array|string} path The path of the property to get. + * @returns {*} Returns the resolved value. + */ + function baseGet(object, path) { + path = castPath(path, object); + + var index = 0, + length = path.length; + + while (object != null && index < length) { + object = object[toKey(path[index++])]; + } + return (index && index == length) ? object : undefined; + } + + /** + * The base implementation of `getAllKeys` and `getAllKeysIn` which uses + * `keysFunc` and `symbolsFunc` to get the enumerable property names and + * symbols of `object`. + * + * @private + * @param {Object} object The object to query. + * @param {Function} keysFunc The function to get the keys of `object`. + * @param {Function} symbolsFunc The function to get the symbols of `object`. + * @returns {Array} Returns the array of property names and symbols. + */ + function baseGetAllKeys(object, keysFunc, symbolsFunc) { + var result = keysFunc(object); + return isArray(object) ? result : arrayPush(result, symbolsFunc(object)); + } + + /** + * The base implementation of `getTag` without fallbacks for buggy environments. + * + * @private + * @param {*} value The value to query. + * @returns {string} Returns the `toStringTag`. + */ + function baseGetTag(value) { + if (value == null) { + return value === undefined ? undefinedTag : nullTag; + } + return (symToStringTag && symToStringTag in Object(value)) + ? getRawTag(value) + : objectToString(value); + } + + /** + * The base implementation of `_.gt` which doesn't coerce arguments. + * + * @private + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if `value` is greater than `other`, + * else `false`. + */ + function baseGt(value, other) { + return value > other; + } + + /** + * The base implementation of `_.has` without support for deep paths. + * + * @private + * @param {Object} [object] The object to query. + * @param {Array|string} key The key to check. + * @returns {boolean} Returns `true` if `key` exists, else `false`. + */ + function baseHas(object, key) { + return object != null && hasOwnProperty.call(object, key); + } + + /** + * The base implementation of `_.hasIn` without support for deep paths. + * + * @private + * @param {Object} [object] The object to query. + * @param {Array|string} key The key to check. + * @returns {boolean} Returns `true` if `key` exists, else `false`. + */ + function baseHasIn(object, key) { + return object != null && key in Object(object); + } + + /** + * The base implementation of `_.inRange` which doesn't coerce arguments. + * + * @private + * @param {number} number The number to check. + * @param {number} start The start of the range. + * @param {number} end The end of the range. + * @returns {boolean} Returns `true` if `number` is in the range, else `false`. + */ + function baseInRange(number, start, end) { + return number >= nativeMin(start, end) && number < nativeMax(start, end); + } + + /** + * The base implementation of methods like `_.intersection`, without support + * for iteratee shorthands, that accepts an array of arrays to inspect. + * + * @private + * @param {Array} arrays The arrays to inspect. + * @param {Function} [iteratee] The iteratee invoked per element. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of shared values. + */ + function baseIntersection(arrays, iteratee, comparator) { + var includes = comparator ? arrayIncludesWith : arrayIncludes, + length = arrays[0].length, + othLength = arrays.length, + othIndex = othLength, + caches = Array(othLength), + maxLength = Infinity, + result = []; + + while (othIndex--) { + var array = arrays[othIndex]; + if (othIndex && iteratee) { + array = arrayMap(array, baseUnary(iteratee)); + } + maxLength = nativeMin(array.length, maxLength); + caches[othIndex] = !comparator && (iteratee || (length >= 120 && array.length >= 120)) + ? new SetCache(othIndex && array) + : undefined; + } + array = arrays[0]; + + var index = -1, + seen = caches[0]; + + outer: + while (++index < length && result.length < maxLength) { + var value = array[index], + computed = iteratee ? iteratee(value) : value; + + value = (comparator || value !== 0) ? value : 0; + if (!(seen + ? cacheHas(seen, computed) + : includes(result, computed, comparator) + )) { + othIndex = othLength; + while (--othIndex) { + var cache = caches[othIndex]; + if (!(cache + ? cacheHas(cache, computed) + : includes(arrays[othIndex], computed, comparator)) + ) { + continue outer; + } + } + if (seen) { + seen.push(computed); + } + result.push(value); + } + } + return result; + } + + /** + * The base implementation of `_.invert` and `_.invertBy` which inverts + * `object` with values transformed by `iteratee` and set by `setter`. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} setter The function to set `accumulator` values. + * @param {Function} iteratee The iteratee to transform values. + * @param {Object} accumulator The initial inverted object. + * @returns {Function} Returns `accumulator`. + */ + function baseInverter(object, setter, iteratee, accumulator) { + baseForOwn(object, function(value, key, object) { + setter(accumulator, iteratee(value), key, object); + }); + return accumulator; + } + + /** + * The base implementation of `_.invoke` without support for individual + * method arguments. + * + * @private + * @param {Object} object The object to query. + * @param {Array|string} path The path of the method to invoke. + * @param {Array} args The arguments to invoke the method with. + * @returns {*} Returns the result of the invoked method. + */ + function baseInvoke(object, path, args) { + path = castPath(path, object); + object = parent(object, path); + var func = object == null ? object : object[toKey(last(path))]; + return func == null ? undefined : apply(func, object, args); + } + + /** + * The base implementation of `_.isArguments`. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an `arguments` object, + */ + function baseIsArguments(value) { + return isObjectLike(value) && baseGetTag(value) == argsTag; + } + + /** + * The base implementation of `_.isArrayBuffer` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an array buffer, else `false`. + */ + function baseIsArrayBuffer(value) { + return isObjectLike(value) && baseGetTag(value) == arrayBufferTag; + } + + /** + * The base implementation of `_.isDate` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a date object, else `false`. + */ + function baseIsDate(value) { + return isObjectLike(value) && baseGetTag(value) == dateTag; + } + + /** + * The base implementation of `_.isEqual` which supports partial comparisons + * and tracks traversed objects. + * + * @private + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @param {boolean} bitmask The bitmask flags. + * 1 - Unordered comparison + * 2 - Partial comparison + * @param {Function} [customizer] The function to customize comparisons. + * @param {Object} [stack] Tracks traversed `value` and `other` objects. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + */ + function baseIsEqual(value, other, bitmask, customizer, stack) { + if (value === other) { + return true; + } + if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) { + return value !== value && other !== other; + } + return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack); + } + + /** + * A specialized version of `baseIsEqual` for arrays and objects which performs + * deep comparisons and tracks traversed objects enabling objects with circular + * references to be compared. + * + * @private + * @param {Object} object The object to compare. + * @param {Object} other The other object to compare. + * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. + * @param {Function} customizer The function to customize comparisons. + * @param {Function} equalFunc The function to determine equivalents of values. + * @param {Object} [stack] Tracks traversed `object` and `other` objects. + * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. + */ + function baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) { + var objIsArr = isArray(object), + othIsArr = isArray(other), + objTag = objIsArr ? arrayTag : getTag(object), + othTag = othIsArr ? arrayTag : getTag(other); + + objTag = objTag == argsTag ? objectTag : objTag; + othTag = othTag == argsTag ? objectTag : othTag; + + var objIsObj = objTag == objectTag, + othIsObj = othTag == objectTag, + isSameTag = objTag == othTag; + + if (isSameTag && isBuffer(object)) { + if (!isBuffer(other)) { + return false; + } + objIsArr = true; + objIsObj = false; + } + if (isSameTag && !objIsObj) { + stack || (stack = new Stack); + return (objIsArr || isTypedArray(object)) + ? equalArrays(object, other, bitmask, customizer, equalFunc, stack) + : equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack); + } + if (!(bitmask & COMPARE_PARTIAL_FLAG)) { + var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'), + othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__'); + + if (objIsWrapped || othIsWrapped) { + var objUnwrapped = objIsWrapped ? object.value() : object, + othUnwrapped = othIsWrapped ? other.value() : other; + + stack || (stack = new Stack); + return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack); + } + } + if (!isSameTag) { + return false; + } + stack || (stack = new Stack); + return equalObjects(object, other, bitmask, customizer, equalFunc, stack); + } + + /** + * The base implementation of `_.isMap` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a map, else `false`. + */ + function baseIsMap(value) { + return isObjectLike(value) && getTag(value) == mapTag; + } + + /** + * The base implementation of `_.isMatch` without support for iteratee shorthands. + * + * @private + * @param {Object} object The object to inspect. + * @param {Object} source The object of property values to match. + * @param {Array} matchData The property names, values, and compare flags to match. + * @param {Function} [customizer] The function to customize comparisons. + * @returns {boolean} Returns `true` if `object` is a match, else `false`. + */ + function baseIsMatch(object, source, matchData, customizer) { + var index = matchData.length, + length = index, + noCustomizer = !customizer; + + if (object == null) { + return !length; + } + object = Object(object); + while (index--) { + var data = matchData[index]; + if ((noCustomizer && data[2]) + ? data[1] !== object[data[0]] + : !(data[0] in object) + ) { + return false; + } + } + while (++index < length) { + data = matchData[index]; + var key = data[0], + objValue = object[key], + srcValue = data[1]; + + if (noCustomizer && data[2]) { + if (objValue === undefined && !(key in object)) { + return false; + } + } else { + var stack = new Stack; + if (customizer) { + var result = customizer(objValue, srcValue, key, object, source, stack); + } + if (!(result === undefined + ? baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG, customizer, stack) + : result + )) { + return false; + } + } + } + return true; + } + + /** + * The base implementation of `_.isNative` without bad shim checks. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a native function, + * else `false`. + */ + function baseIsNative(value) { + if (!isObject(value) || isMasked(value)) { + return false; + } + var pattern = isFunction(value) ? reIsNative : reIsHostCtor; + return pattern.test(toSource(value)); + } + + /** + * The base implementation of `_.isRegExp` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a regexp, else `false`. + */ + function baseIsRegExp(value) { + return isObjectLike(value) && baseGetTag(value) == regexpTag; + } + + /** + * The base implementation of `_.isSet` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a set, else `false`. + */ + function baseIsSet(value) { + return isObjectLike(value) && getTag(value) == setTag; + } + + /** + * The base implementation of `_.isTypedArray` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. + */ + function baseIsTypedArray(value) { + return isObjectLike(value) && + isLength(value.length) && !!typedArrayTags[baseGetTag(value)]; + } + + /** + * The base implementation of `_.iteratee`. + * + * @private + * @param {*} [value=_.identity] The value to convert to an iteratee. + * @returns {Function} Returns the iteratee. + */ + function baseIteratee(value) { + // Don't store the `typeof` result in a variable to avoid a JIT bug in Safari 9. + // See https://bugs.webkit.org/show_bug.cgi?id=156034 for more details. + if (typeof value == 'function') { + return value; + } + if (value == null) { + return identity; + } + if (typeof value == 'object') { + return isArray(value) + ? baseMatchesProperty(value[0], value[1]) + : baseMatches(value); + } + return property(value); + } + + /** + * The base implementation of `_.keys` which doesn't treat sparse arrays as dense. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + */ + function baseKeys(object) { + if (!isPrototype(object)) { + return nativeKeys(object); + } + var result = []; + for (var key in Object(object)) { + if (hasOwnProperty.call(object, key) && key != 'constructor') { + result.push(key); + } + } + return result; + } + + /** + * The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + */ + function baseKeysIn(object) { + if (!isObject(object)) { + return nativeKeysIn(object); + } + var isProto = isPrototype(object), + result = []; + + for (var key in object) { + if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) { + result.push(key); + } + } + return result; + } + + /** + * The base implementation of `_.lt` which doesn't coerce arguments. + * + * @private + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if `value` is less than `other`, + * else `false`. + */ + function baseLt(value, other) { + return value < other; + } + + /** + * The base implementation of `_.map` without support for iteratee shorthands. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Array} Returns the new mapped array. + */ + function baseMap(collection, iteratee) { + var index = -1, + result = isArrayLike(collection) ? Array(collection.length) : []; + + baseEach(collection, function(value, key, collection) { + result[++index] = iteratee(value, key, collection); + }); + return result; + } + + /** + * The base implementation of `_.matches` which doesn't clone `source`. + * + * @private + * @param {Object} source The object of property values to match. + * @returns {Function} Returns the new spec function. + */ + function baseMatches(source) { + var matchData = getMatchData(source); + if (matchData.length == 1 && matchData[0][2]) { + return matchesStrictComparable(matchData[0][0], matchData[0][1]); + } + return function(object) { + return object === source || baseIsMatch(object, source, matchData); + }; + } + + /** + * The base implementation of `_.matchesProperty` which doesn't clone `srcValue`. + * + * @private + * @param {string} path The path of the property to get. + * @param {*} srcValue The value to match. + * @returns {Function} Returns the new spec function. + */ + function baseMatchesProperty(path, srcValue) { + if (isKey(path) && isStrictComparable(srcValue)) { + return matchesStrictComparable(toKey(path), srcValue); + } + return function(object) { + var objValue = get(object, path); + return (objValue === undefined && objValue === srcValue) + ? hasIn(object, path) + : baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG); + }; + } + + /** + * The base implementation of `_.merge` without support for multiple sources. + * + * @private + * @param {Object} object The destination object. + * @param {Object} source The source object. + * @param {number} srcIndex The index of `source`. + * @param {Function} [customizer] The function to customize merged values. + * @param {Object} [stack] Tracks traversed source values and their merged + * counterparts. + */ + function baseMerge(object, source, srcIndex, customizer, stack) { + if (object === source) { + return; + } + baseFor(source, function(srcValue, key) { + if (isObject(srcValue)) { + stack || (stack = new Stack); + baseMergeDeep(object, source, key, srcIndex, baseMerge, customizer, stack); + } + else { + var newValue = customizer + ? customizer(safeGet(object, key), srcValue, (key + ''), object, source, stack) + : undefined; + + if (newValue === undefined) { + newValue = srcValue; + } + assignMergeValue(object, key, newValue); + } + }, keysIn); + } + + /** + * A specialized version of `baseMerge` for arrays and objects which performs + * deep merges and tracks traversed objects enabling objects with circular + * references to be merged. + * + * @private + * @param {Object} object The destination object. + * @param {Object} source The source object. + * @param {string} key The key of the value to merge. + * @param {number} srcIndex The index of `source`. + * @param {Function} mergeFunc The function to merge values. + * @param {Function} [customizer] The function to customize assigned values. + * @param {Object} [stack] Tracks traversed source values and their merged + * counterparts. + */ + function baseMergeDeep(object, source, key, srcIndex, mergeFunc, customizer, stack) { + var objValue = safeGet(object, key), + srcValue = safeGet(source, key), + stacked = stack.get(srcValue); + + if (stacked) { + assignMergeValue(object, key, stacked); + return; + } + var newValue = customizer + ? customizer(objValue, srcValue, (key + ''), object, source, stack) + : undefined; + + var isCommon = newValue === undefined; + + if (isCommon) { + var isArr = isArray(srcValue), + isBuff = !isArr && isBuffer(srcValue), + isTyped = !isArr && !isBuff && isTypedArray(srcValue); + + newValue = srcValue; + if (isArr || isBuff || isTyped) { + if (isArray(objValue)) { + newValue = objValue; + } + else if (isArrayLikeObject(objValue)) { + newValue = copyArray(objValue); + } + else if (isBuff) { + isCommon = false; + newValue = cloneBuffer(srcValue, true); + } + else if (isTyped) { + isCommon = false; + newValue = cloneTypedArray(srcValue, true); + } + else { + newValue = []; + } + } + else if (isPlainObject(srcValue) || isArguments(srcValue)) { + newValue = objValue; + if (isArguments(objValue)) { + newValue = toPlainObject(objValue); + } + else if (!isObject(objValue) || (srcIndex && isFunction(objValue))) { + newValue = initCloneObject(srcValue); + } + } + else { + isCommon = false; + } + } + if (isCommon) { + // Recursively merge objects and arrays (susceptible to call stack limits). + stack.set(srcValue, newValue); + mergeFunc(newValue, srcValue, srcIndex, customizer, stack); + stack['delete'](srcValue); + } + assignMergeValue(object, key, newValue); + } + + /** + * The base implementation of `_.nth` which doesn't coerce arguments. + * + * @private + * @param {Array} array The array to query. + * @param {number} n The index of the element to return. + * @returns {*} Returns the nth element of `array`. + */ + function baseNth(array, n) { + var length = array.length; + if (!length) { + return; + } + n += n < 0 ? length : 0; + return isIndex(n, length) ? array[n] : undefined; + } + + /** + * The base implementation of `_.orderBy` without param guards. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function[]|Object[]|string[]} iteratees The iteratees to sort by. + * @param {string[]} orders The sort orders of `iteratees`. + * @returns {Array} Returns the new sorted array. + */ + function baseOrderBy(collection, iteratees, orders) { + var index = -1; + iteratees = arrayMap(iteratees.length ? iteratees : [identity], baseUnary(getIteratee())); + + var result = baseMap(collection, function(value, key, collection) { + var criteria = arrayMap(iteratees, function(iteratee) { + return iteratee(value); + }); + return { 'criteria': criteria, 'index': ++index, 'value': value }; + }); + + return baseSortBy(result, function(object, other) { + return compareMultiple(object, other, orders); + }); + } + + /** + * The base implementation of `_.pick` without support for individual + * property identifiers. + * + * @private + * @param {Object} object The source object. + * @param {string[]} paths The property paths to pick. + * @returns {Object} Returns the new object. + */ + function basePick(object, paths) { + return basePickBy(object, paths, function(value, path) { + return hasIn(object, path); + }); + } + + /** + * The base implementation of `_.pickBy` without support for iteratee shorthands. + * + * @private + * @param {Object} object The source object. + * @param {string[]} paths The property paths to pick. + * @param {Function} predicate The function invoked per property. + * @returns {Object} Returns the new object. + */ + function basePickBy(object, paths, predicate) { + var index = -1, + length = paths.length, + result = {}; + + while (++index < length) { + var path = paths[index], + value = baseGet(object, path); + + if (predicate(value, path)) { + baseSet(result, castPath(path, object), value); + } + } + return result; + } + + /** + * A specialized version of `baseProperty` which supports deep paths. + * + * @private + * @param {Array|string} path The path of the property to get. + * @returns {Function} Returns the new accessor function. + */ + function basePropertyDeep(path) { + return function(object) { + return baseGet(object, path); + }; + } + + /** + * The base implementation of `_.pullAllBy` without support for iteratee + * shorthands. + * + * @private + * @param {Array} array The array to modify. + * @param {Array} values The values to remove. + * @param {Function} [iteratee] The iteratee invoked per element. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns `array`. + */ + function basePullAll(array, values, iteratee, comparator) { + var indexOf = comparator ? baseIndexOfWith : baseIndexOf, + index = -1, + length = values.length, + seen = array; + + if (array === values) { + values = copyArray(values); + } + if (iteratee) { + seen = arrayMap(array, baseUnary(iteratee)); + } + while (++index < length) { + var fromIndex = 0, + value = values[index], + computed = iteratee ? iteratee(value) : value; + + while ((fromIndex = indexOf(seen, computed, fromIndex, comparator)) > -1) { + if (seen !== array) { + splice.call(seen, fromIndex, 1); + } + splice.call(array, fromIndex, 1); + } + } + return array; + } + + /** + * The base implementation of `_.pullAt` without support for individual + * indexes or capturing the removed elements. + * + * @private + * @param {Array} array The array to modify. + * @param {number[]} indexes The indexes of elements to remove. + * @returns {Array} Returns `array`. + */ + function basePullAt(array, indexes) { + var length = array ? indexes.length : 0, + lastIndex = length - 1; + + while (length--) { + var index = indexes[length]; + if (length == lastIndex || index !== previous) { + var previous = index; + if (isIndex(index)) { + splice.call(array, index, 1); + } else { + baseUnset(array, index); + } + } + } + return array; + } + + /** + * The base implementation of `_.random` without support for returning + * floating-point numbers. + * + * @private + * @param {number} lower The lower bound. + * @param {number} upper The upper bound. + * @returns {number} Returns the random number. + */ + function baseRandom(lower, upper) { + return lower + nativeFloor(nativeRandom() * (upper - lower + 1)); + } + + /** + * The base implementation of `_.range` and `_.rangeRight` which doesn't + * coerce arguments. + * + * @private + * @param {number} start The start of the range. + * @param {number} end The end of the range. + * @param {number} step The value to increment or decrement by. + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Array} Returns the range of numbers. + */ + function baseRange(start, end, step, fromRight) { + var index = -1, + length = nativeMax(nativeCeil((end - start) / (step || 1)), 0), + result = Array(length); + + while (length--) { + result[fromRight ? length : ++index] = start; + start += step; + } + return result; + } + + /** + * The base implementation of `_.repeat` which doesn't coerce arguments. + * + * @private + * @param {string} string The string to repeat. + * @param {number} n The number of times to repeat the string. + * @returns {string} Returns the repeated string. + */ + function baseRepeat(string, n) { + var result = ''; + if (!string || n < 1 || n > MAX_SAFE_INTEGER) { + return result; + } + // Leverage the exponentiation by squaring algorithm for a faster repeat. + // See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more details. + do { + if (n % 2) { + result += string; + } + n = nativeFloor(n / 2); + if (n) { + string += string; + } + } while (n); + + return result; + } + + /** + * The base implementation of `_.rest` which doesn't validate or coerce arguments. + * + * @private + * @param {Function} func The function to apply a rest parameter to. + * @param {number} [start=func.length-1] The start position of the rest parameter. + * @returns {Function} Returns the new function. + */ + function baseRest(func, start) { + return setToString(overRest(func, start, identity), func + ''); + } + + /** + * The base implementation of `_.sample`. + * + * @private + * @param {Array|Object} collection The collection to sample. + * @returns {*} Returns the random element. + */ + function baseSample(collection) { + return arraySample(values(collection)); + } + + /** + * The base implementation of `_.sampleSize` without param guards. + * + * @private + * @param {Array|Object} collection The collection to sample. + * @param {number} n The number of elements to sample. + * @returns {Array} Returns the random elements. + */ + function baseSampleSize(collection, n) { + var array = values(collection); + return shuffleSelf(array, baseClamp(n, 0, array.length)); + } + + /** + * The base implementation of `_.set`. + * + * @private + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @param {Function} [customizer] The function to customize path creation. + * @returns {Object} Returns `object`. + */ + function baseSet(object, path, value, customizer) { + if (!isObject(object)) { + return object; + } + path = castPath(path, object); + + var index = -1, + length = path.length, + lastIndex = length - 1, + nested = object; + + while (nested != null && ++index < length) { + var key = toKey(path[index]), + newValue = value; + + if (index != lastIndex) { + var objValue = nested[key]; + newValue = customizer ? customizer(objValue, key, nested) : undefined; + if (newValue === undefined) { + newValue = isObject(objValue) + ? objValue + : (isIndex(path[index + 1]) ? [] : {}); + } + } + assignValue(nested, key, newValue); + nested = nested[key]; + } + return object; + } + + /** + * The base implementation of `setData` without support for hot loop shorting. + * + * @private + * @param {Function} func The function to associate metadata with. + * @param {*} data The metadata. + * @returns {Function} Returns `func`. + */ + var baseSetData = !metaMap ? identity : function(func, data) { + metaMap.set(func, data); + return func; + }; + + /** + * The base implementation of `setToString` without support for hot loop shorting. + * + * @private + * @param {Function} func The function to modify. + * @param {Function} string The `toString` result. + * @returns {Function} Returns `func`. + */ + var baseSetToString = !defineProperty ? identity : function(func, string) { + return defineProperty(func, 'toString', { + 'configurable': true, + 'enumerable': false, + 'value': constant(string), + 'writable': true + }); + }; + + /** + * The base implementation of `_.shuffle`. + * + * @private + * @param {Array|Object} collection The collection to shuffle. + * @returns {Array} Returns the new shuffled array. + */ + function baseShuffle(collection) { + return shuffleSelf(values(collection)); + } + + /** + * The base implementation of `_.slice` without an iteratee call guard. + * + * @private + * @param {Array} array The array to slice. + * @param {number} [start=0] The start position. + * @param {number} [end=array.length] The end position. + * @returns {Array} Returns the slice of `array`. + */ + function baseSlice(array, start, end) { + var index = -1, + length = array.length; + + if (start < 0) { + start = -start > length ? 0 : (length + start); + } + end = end > length ? length : end; + if (end < 0) { + end += length; + } + length = start > end ? 0 : ((end - start) >>> 0); + start >>>= 0; + + var result = Array(length); + while (++index < length) { + result[index] = array[index + start]; + } + return result; + } + + /** + * The base implementation of `_.some` without support for iteratee shorthands. + * + * @private + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} predicate The function invoked per iteration. + * @returns {boolean} Returns `true` if any element passes the predicate check, + * else `false`. + */ + function baseSome(collection, predicate) { + var result; + + baseEach(collection, function(value, index, collection) { + result = predicate(value, index, collection); + return !result; + }); + return !!result; + } + + /** + * The base implementation of `_.sortedIndex` and `_.sortedLastIndex` which + * performs a binary search of `array` to determine the index at which `value` + * should be inserted into `array` in order to maintain its sort order. + * + * @private + * @param {Array} array The sorted array to inspect. + * @param {*} value The value to evaluate. + * @param {boolean} [retHighest] Specify returning the highest qualified index. + * @returns {number} Returns the index at which `value` should be inserted + * into `array`. + */ + function baseSortedIndex(array, value, retHighest) { + var low = 0, + high = array == null ? low : array.length; + + if (typeof value == 'number' && value === value && high <= HALF_MAX_ARRAY_LENGTH) { + while (low < high) { + var mid = (low + high) >>> 1, + computed = array[mid]; + + if (computed !== null && !isSymbol(computed) && + (retHighest ? (computed <= value) : (computed < value))) { + low = mid + 1; + } else { + high = mid; + } + } + return high; + } + return baseSortedIndexBy(array, value, identity, retHighest); + } + + /** + * The base implementation of `_.sortedIndexBy` and `_.sortedLastIndexBy` + * which invokes `iteratee` for `value` and each element of `array` to compute + * their sort ranking. The iteratee is invoked with one argument; (value). + * + * @private + * @param {Array} array The sorted array to inspect. + * @param {*} value The value to evaluate. + * @param {Function} iteratee The iteratee invoked per element. + * @param {boolean} [retHighest] Specify returning the highest qualified index. + * @returns {number} Returns the index at which `value` should be inserted + * into `array`. + */ + function baseSortedIndexBy(array, value, iteratee, retHighest) { + value = iteratee(value); + + var low = 0, + high = array == null ? 0 : array.length, + valIsNaN = value !== value, + valIsNull = value === null, + valIsSymbol = isSymbol(value), + valIsUndefined = value === undefined; + + while (low < high) { + var mid = nativeFloor((low + high) / 2), + computed = iteratee(array[mid]), + othIsDefined = computed !== undefined, + othIsNull = computed === null, + othIsReflexive = computed === computed, + othIsSymbol = isSymbol(computed); + + if (valIsNaN) { + var setLow = retHighest || othIsReflexive; + } else if (valIsUndefined) { + setLow = othIsReflexive && (retHighest || othIsDefined); + } else if (valIsNull) { + setLow = othIsReflexive && othIsDefined && (retHighest || !othIsNull); + } else if (valIsSymbol) { + setLow = othIsReflexive && othIsDefined && !othIsNull && (retHighest || !othIsSymbol); + } else if (othIsNull || othIsSymbol) { + setLow = false; + } else { + setLow = retHighest ? (computed <= value) : (computed < value); + } + if (setLow) { + low = mid + 1; + } else { + high = mid; + } + } + return nativeMin(high, MAX_ARRAY_INDEX); + } + + /** + * The base implementation of `_.sortedUniq` and `_.sortedUniqBy` without + * support for iteratee shorthands. + * + * @private + * @param {Array} array The array to inspect. + * @param {Function} [iteratee] The iteratee invoked per element. + * @returns {Array} Returns the new duplicate free array. + */ + function baseSortedUniq(array, iteratee) { + var index = -1, + length = array.length, + resIndex = 0, + result = []; + + while (++index < length) { + var value = array[index], + computed = iteratee ? iteratee(value) : value; + + if (!index || !eq(computed, seen)) { + var seen = computed; + result[resIndex++] = value === 0 ? 0 : value; + } + } + return result; + } + + /** + * The base implementation of `_.toNumber` which doesn't ensure correct + * conversions of binary, hexadecimal, or octal string values. + * + * @private + * @param {*} value The value to process. + * @returns {number} Returns the number. + */ + function baseToNumber(value) { + if (typeof value == 'number') { + return value; + } + if (isSymbol(value)) { + return NAN; + } + return +value; + } + + /** + * The base implementation of `_.toString` which doesn't convert nullish + * values to empty strings. + * + * @private + * @param {*} value The value to process. + * @returns {string} Returns the string. + */ + function baseToString(value) { + // Exit early for strings to avoid a performance hit in some environments. + if (typeof value == 'string') { + return value; + } + if (isArray(value)) { + // Recursively convert values (susceptible to call stack limits). + return arrayMap(value, baseToString) + ''; + } + if (isSymbol(value)) { + return symbolToString ? symbolToString.call(value) : ''; + } + var result = (value + ''); + return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; + } + + /** + * The base implementation of `_.uniqBy` without support for iteratee shorthands. + * + * @private + * @param {Array} array The array to inspect. + * @param {Function} [iteratee] The iteratee invoked per element. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new duplicate free array. + */ + function baseUniq(array, iteratee, comparator) { + var index = -1, + includes = arrayIncludes, + length = array.length, + isCommon = true, + result = [], + seen = result; + + if (comparator) { + isCommon = false; + includes = arrayIncludesWith; + } + else if (length >= LARGE_ARRAY_SIZE) { + var set = iteratee ? null : createSet(array); + if (set) { + return setToArray(set); + } + isCommon = false; + includes = cacheHas; + seen = new SetCache; + } + else { + seen = iteratee ? [] : result; + } + outer: + while (++index < length) { + var value = array[index], + computed = iteratee ? iteratee(value) : value; + + value = (comparator || value !== 0) ? value : 0; + if (isCommon && computed === computed) { + var seenIndex = seen.length; + while (seenIndex--) { + if (seen[seenIndex] === computed) { + continue outer; + } + } + if (iteratee) { + seen.push(computed); + } + result.push(value); + } + else if (!includes(seen, computed, comparator)) { + if (seen !== result) { + seen.push(computed); + } + result.push(value); + } + } + return result; + } + + /** + * The base implementation of `_.unset`. + * + * @private + * @param {Object} object The object to modify. + * @param {Array|string} path The property path to unset. + * @returns {boolean} Returns `true` if the property is deleted, else `false`. + */ + function baseUnset(object, path) { + path = castPath(path, object); + object = parent(object, path); + return object == null || delete object[toKey(last(path))]; + } + + /** + * The base implementation of `_.update`. + * + * @private + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to update. + * @param {Function} updater The function to produce the updated value. + * @param {Function} [customizer] The function to customize path creation. + * @returns {Object} Returns `object`. + */ + function baseUpdate(object, path, updater, customizer) { + return baseSet(object, path, updater(baseGet(object, path)), customizer); + } + + /** + * The base implementation of methods like `_.dropWhile` and `_.takeWhile` + * without support for iteratee shorthands. + * + * @private + * @param {Array} array The array to query. + * @param {Function} predicate The function invoked per iteration. + * @param {boolean} [isDrop] Specify dropping elements instead of taking them. + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Array} Returns the slice of `array`. + */ + function baseWhile(array, predicate, isDrop, fromRight) { + var length = array.length, + index = fromRight ? length : -1; + + while ((fromRight ? index-- : ++index < length) && + predicate(array[index], index, array)) {} + + return isDrop + ? baseSlice(array, (fromRight ? 0 : index), (fromRight ? index + 1 : length)) + : baseSlice(array, (fromRight ? index + 1 : 0), (fromRight ? length : index)); + } + + /** + * The base implementation of `wrapperValue` which returns the result of + * performing a sequence of actions on the unwrapped `value`, where each + * successive action is supplied the return value of the previous. + * + * @private + * @param {*} value The unwrapped value. + * @param {Array} actions Actions to perform to resolve the unwrapped value. + * @returns {*} Returns the resolved value. + */ + function baseWrapperValue(value, actions) { + var result = value; + if (result instanceof LazyWrapper) { + result = result.value(); + } + return arrayReduce(actions, function(result, action) { + return action.func.apply(action.thisArg, arrayPush([result], action.args)); + }, result); + } + + /** + * The base implementation of methods like `_.xor`, without support for + * iteratee shorthands, that accepts an array of arrays to inspect. + * + * @private + * @param {Array} arrays The arrays to inspect. + * @param {Function} [iteratee] The iteratee invoked per element. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of values. + */ + function baseXor(arrays, iteratee, comparator) { + var length = arrays.length; + if (length < 2) { + return length ? baseUniq(arrays[0]) : []; + } + var index = -1, + result = Array(length); + + while (++index < length) { + var array = arrays[index], + othIndex = -1; + + while (++othIndex < length) { + if (othIndex != index) { + result[index] = baseDifference(result[index] || array, arrays[othIndex], iteratee, comparator); + } + } + } + return baseUniq(baseFlatten(result, 1), iteratee, comparator); + } + + /** + * This base implementation of `_.zipObject` which assigns values using `assignFunc`. + * + * @private + * @param {Array} props The property identifiers. + * @param {Array} values The property values. + * @param {Function} assignFunc The function to assign values. + * @returns {Object} Returns the new object. + */ + function baseZipObject(props, values, assignFunc) { + var index = -1, + length = props.length, + valsLength = values.length, + result = {}; + + while (++index < length) { + var value = index < valsLength ? values[index] : undefined; + assignFunc(result, props[index], value); + } + return result; + } + + /** + * Casts `value` to an empty array if it's not an array like object. + * + * @private + * @param {*} value The value to inspect. + * @returns {Array|Object} Returns the cast array-like object. + */ + function castArrayLikeObject(value) { + return isArrayLikeObject(value) ? value : []; + } + + /** + * Casts `value` to `identity` if it's not a function. + * + * @private + * @param {*} value The value to inspect. + * @returns {Function} Returns cast function. + */ + function castFunction(value) { + return typeof value == 'function' ? value : identity; + } + + /** + * Casts `value` to a path array if it's not one. + * + * @private + * @param {*} value The value to inspect. + * @param {Object} [object] The object to query keys on. + * @returns {Array} Returns the cast property path array. + */ + function castPath(value, object) { + if (isArray(value)) { + return value; + } + return isKey(value, object) ? [value] : stringToPath(toString(value)); + } + + /** + * A `baseRest` alias which can be replaced with `identity` by module + * replacement plugins. + * + * @private + * @type {Function} + * @param {Function} func The function to apply a rest parameter to. + * @returns {Function} Returns the new function. + */ + var castRest = baseRest; + + /** + * Casts `array` to a slice if it's needed. + * + * @private + * @param {Array} array The array to inspect. + * @param {number} start The start position. + * @param {number} [end=array.length] The end position. + * @returns {Array} Returns the cast slice. + */ + function castSlice(array, start, end) { + var length = array.length; + end = end === undefined ? length : end; + return (!start && end >= length) ? array : baseSlice(array, start, end); + } + + /** + * A simple wrapper around the global [`clearTimeout`](https://mdn.io/clearTimeout). + * + * @private + * @param {number|Object} id The timer id or timeout object of the timer to clear. + */ + var clearTimeout = ctxClearTimeout || function(id) { + return root.clearTimeout(id); + }; + + /** + * Creates a clone of `buffer`. + * + * @private + * @param {Buffer} buffer The buffer to clone. + * @param {boolean} [isDeep] Specify a deep clone. + * @returns {Buffer} Returns the cloned buffer. + */ + function cloneBuffer(buffer, isDeep) { + if (isDeep) { + return buffer.slice(); + } + var length = buffer.length, + result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length); + + buffer.copy(result); + return result; + } + + /** + * Creates a clone of `arrayBuffer`. + * + * @private + * @param {ArrayBuffer} arrayBuffer The array buffer to clone. + * @returns {ArrayBuffer} Returns the cloned array buffer. + */ + function cloneArrayBuffer(arrayBuffer) { + var result = new arrayBuffer.constructor(arrayBuffer.byteLength); + new Uint8Array(result).set(new Uint8Array(arrayBuffer)); + return result; + } + + /** + * Creates a clone of `dataView`. + * + * @private + * @param {Object} dataView The data view to clone. + * @param {boolean} [isDeep] Specify a deep clone. + * @returns {Object} Returns the cloned data view. + */ + function cloneDataView(dataView, isDeep) { + var buffer = isDeep ? cloneArrayBuffer(dataView.buffer) : dataView.buffer; + return new dataView.constructor(buffer, dataView.byteOffset, dataView.byteLength); + } + + /** + * Creates a clone of `regexp`. + * + * @private + * @param {Object} regexp The regexp to clone. + * @returns {Object} Returns the cloned regexp. + */ + function cloneRegExp(regexp) { + var result = new regexp.constructor(regexp.source, reFlags.exec(regexp)); + result.lastIndex = regexp.lastIndex; + return result; + } + + /** + * Creates a clone of the `symbol` object. + * + * @private + * @param {Object} symbol The symbol object to clone. + * @returns {Object} Returns the cloned symbol object. + */ + function cloneSymbol(symbol) { + return symbolValueOf ? Object(symbolValueOf.call(symbol)) : {}; + } + + /** + * Creates a clone of `typedArray`. + * + * @private + * @param {Object} typedArray The typed array to clone. + * @param {boolean} [isDeep] Specify a deep clone. + * @returns {Object} Returns the cloned typed array. + */ + function cloneTypedArray(typedArray, isDeep) { + var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer; + return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length); + } + + /** + * Compares values to sort them in ascending order. + * + * @private + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {number} Returns the sort order indicator for `value`. + */ + function compareAscending(value, other) { + if (value !== other) { + var valIsDefined = value !== undefined, + valIsNull = value === null, + valIsReflexive = value === value, + valIsSymbol = isSymbol(value); + + var othIsDefined = other !== undefined, + othIsNull = other === null, + othIsReflexive = other === other, + othIsSymbol = isSymbol(other); + + if ((!othIsNull && !othIsSymbol && !valIsSymbol && value > other) || + (valIsSymbol && othIsDefined && othIsReflexive && !othIsNull && !othIsSymbol) || + (valIsNull && othIsDefined && othIsReflexive) || + (!valIsDefined && othIsReflexive) || + !valIsReflexive) { + return 1; + } + if ((!valIsNull && !valIsSymbol && !othIsSymbol && value < other) || + (othIsSymbol && valIsDefined && valIsReflexive && !valIsNull && !valIsSymbol) || + (othIsNull && valIsDefined && valIsReflexive) || + (!othIsDefined && valIsReflexive) || + !othIsReflexive) { + return -1; + } + } + return 0; + } + + /** + * Used by `_.orderBy` to compare multiple properties of a value to another + * and stable sort them. + * + * If `orders` is unspecified, all values are sorted in ascending order. Otherwise, + * specify an order of "desc" for descending or "asc" for ascending sort order + * of corresponding values. + * + * @private + * @param {Object} object The object to compare. + * @param {Object} other The other object to compare. + * @param {boolean[]|string[]} orders The order to sort by for each property. + * @returns {number} Returns the sort order indicator for `object`. + */ + function compareMultiple(object, other, orders) { + var index = -1, + objCriteria = object.criteria, + othCriteria = other.criteria, + length = objCriteria.length, + ordersLength = orders.length; + + while (++index < length) { + var result = compareAscending(objCriteria[index], othCriteria[index]); + if (result) { + if (index >= ordersLength) { + return result; + } + var order = orders[index]; + return result * (order == 'desc' ? -1 : 1); + } + } + // Fixes an `Array#sort` bug in the JS engine embedded in Adobe applications + // that causes it, under certain circumstances, to provide the same value for + // `object` and `other`. See https://github.com/jashkenas/underscore/pull/1247 + // for more details. + // + // This also ensures a stable sort in V8 and other engines. + // See https://bugs.chromium.org/p/v8/issues/detail?id=90 for more details. + return object.index - other.index; + } + + /** + * Creates an array that is the composition of partially applied arguments, + * placeholders, and provided arguments into a single array of arguments. + * + * @private + * @param {Array} args The provided arguments. + * @param {Array} partials The arguments to prepend to those provided. + * @param {Array} holders The `partials` placeholder indexes. + * @params {boolean} [isCurried] Specify composing for a curried function. + * @returns {Array} Returns the new array of composed arguments. + */ + function composeArgs(args, partials, holders, isCurried) { + var argsIndex = -1, + argsLength = args.length, + holdersLength = holders.length, + leftIndex = -1, + leftLength = partials.length, + rangeLength = nativeMax(argsLength - holdersLength, 0), + result = Array(leftLength + rangeLength), + isUncurried = !isCurried; + + while (++leftIndex < leftLength) { + result[leftIndex] = partials[leftIndex]; + } + while (++argsIndex < holdersLength) { + if (isUncurried || argsIndex < argsLength) { + result[holders[argsIndex]] = args[argsIndex]; + } + } + while (rangeLength--) { + result[leftIndex++] = args[argsIndex++]; + } + return result; + } + + /** + * This function is like `composeArgs` except that the arguments composition + * is tailored for `_.partialRight`. + * + * @private + * @param {Array} args The provided arguments. + * @param {Array} partials The arguments to append to those provided. + * @param {Array} holders The `partials` placeholder indexes. + * @params {boolean} [isCurried] Specify composing for a curried function. + * @returns {Array} Returns the new array of composed arguments. + */ + function composeArgsRight(args, partials, holders, isCurried) { + var argsIndex = -1, + argsLength = args.length, + holdersIndex = -1, + holdersLength = holders.length, + rightIndex = -1, + rightLength = partials.length, + rangeLength = nativeMax(argsLength - holdersLength, 0), + result = Array(rangeLength + rightLength), + isUncurried = !isCurried; + + while (++argsIndex < rangeLength) { + result[argsIndex] = args[argsIndex]; + } + var offset = argsIndex; + while (++rightIndex < rightLength) { + result[offset + rightIndex] = partials[rightIndex]; + } + while (++holdersIndex < holdersLength) { + if (isUncurried || argsIndex < argsLength) { + result[offset + holders[holdersIndex]] = args[argsIndex++]; + } + } + return result; + } + + /** + * Copies the values of `source` to `array`. + * + * @private + * @param {Array} source The array to copy values from. + * @param {Array} [array=[]] The array to copy values to. + * @returns {Array} Returns `array`. + */ + function copyArray(source, array) { + var index = -1, + length = source.length; + + array || (array = Array(length)); + while (++index < length) { + array[index] = source[index]; + } + return array; + } + + /** + * Copies properties of `source` to `object`. + * + * @private + * @param {Object} source The object to copy properties from. + * @param {Array} props The property identifiers to copy. + * @param {Object} [object={}] The object to copy properties to. + * @param {Function} [customizer] The function to customize copied values. + * @returns {Object} Returns `object`. + */ + function copyObject(source, props, object, customizer) { + var isNew = !object; + object || (object = {}); + + var index = -1, + length = props.length; + + while (++index < length) { + var key = props[index]; + + var newValue = customizer + ? customizer(object[key], source[key], key, object, source) + : undefined; + + if (newValue === undefined) { + newValue = source[key]; + } + if (isNew) { + baseAssignValue(object, key, newValue); + } else { + assignValue(object, key, newValue); + } + } + return object; + } + + /** + * Copies own symbols of `source` to `object`. + * + * @private + * @param {Object} source The object to copy symbols from. + * @param {Object} [object={}] The object to copy symbols to. + * @returns {Object} Returns `object`. + */ + function copySymbols(source, object) { + return copyObject(source, getSymbols(source), object); + } + + /** + * Copies own and inherited symbols of `source` to `object`. + * + * @private + * @param {Object} source The object to copy symbols from. + * @param {Object} [object={}] The object to copy symbols to. + * @returns {Object} Returns `object`. + */ + function copySymbolsIn(source, object) { + return copyObject(source, getSymbolsIn(source), object); + } + + /** + * Creates a function like `_.groupBy`. + * + * @private + * @param {Function} setter The function to set accumulator values. + * @param {Function} [initializer] The accumulator object initializer. + * @returns {Function} Returns the new aggregator function. + */ + function createAggregator(setter, initializer) { + return function(collection, iteratee) { + var func = isArray(collection) ? arrayAggregator : baseAggregator, + accumulator = initializer ? initializer() : {}; + + return func(collection, setter, getIteratee(iteratee, 2), accumulator); + }; + } + + /** + * Creates a function like `_.assign`. + * + * @private + * @param {Function} assigner The function to assign values. + * @returns {Function} Returns the new assigner function. + */ + function createAssigner(assigner) { + return baseRest(function(object, sources) { + var index = -1, + length = sources.length, + customizer = length > 1 ? sources[length - 1] : undefined, + guard = length > 2 ? sources[2] : undefined; + + customizer = (assigner.length > 3 && typeof customizer == 'function') + ? (length--, customizer) + : undefined; + + if (guard && isIterateeCall(sources[0], sources[1], guard)) { + customizer = length < 3 ? undefined : customizer; + length = 1; + } + object = Object(object); + while (++index < length) { + var source = sources[index]; + if (source) { + assigner(object, source, index, customizer); + } + } + return object; + }); + } + + /** + * Creates a `baseEach` or `baseEachRight` function. + * + * @private + * @param {Function} eachFunc The function to iterate over a collection. + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Function} Returns the new base function. + */ + function createBaseEach(eachFunc, fromRight) { + return function(collection, iteratee) { + if (collection == null) { + return collection; + } + if (!isArrayLike(collection)) { + return eachFunc(collection, iteratee); + } + var length = collection.length, + index = fromRight ? length : -1, + iterable = Object(collection); + + while ((fromRight ? index-- : ++index < length)) { + if (iteratee(iterable[index], index, iterable) === false) { + break; + } + } + return collection; + }; + } + + /** + * Creates a base function for methods like `_.forIn` and `_.forOwn`. + * + * @private + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Function} Returns the new base function. + */ + function createBaseFor(fromRight) { + return function(object, iteratee, keysFunc) { + var index = -1, + iterable = Object(object), + props = keysFunc(object), + length = props.length; + + while (length--) { + var key = props[fromRight ? length : ++index]; + if (iteratee(iterable[key], key, iterable) === false) { + break; + } + } + return object; + }; + } + + /** + * Creates a function that wraps `func` to invoke it with the optional `this` + * binding of `thisArg`. + * + * @private + * @param {Function} func The function to wrap. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @param {*} [thisArg] The `this` binding of `func`. + * @returns {Function} Returns the new wrapped function. + */ + function createBind(func, bitmask, thisArg) { + var isBind = bitmask & WRAP_BIND_FLAG, + Ctor = createCtor(func); + + function wrapper() { + var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; + return fn.apply(isBind ? thisArg : this, arguments); + } + return wrapper; + } + + /** + * Creates a function like `_.lowerFirst`. + * + * @private + * @param {string} methodName The name of the `String` case method to use. + * @returns {Function} Returns the new case function. + */ + function createCaseFirst(methodName) { + return function(string) { + string = toString(string); + + var strSymbols = hasUnicode(string) + ? stringToArray(string) + : undefined; + + var chr = strSymbols + ? strSymbols[0] + : string.charAt(0); + + var trailing = strSymbols + ? castSlice(strSymbols, 1).join('') + : string.slice(1); + + return chr[methodName]() + trailing; + }; + } + + /** + * Creates a function like `_.camelCase`. + * + * @private + * @param {Function} callback The function to combine each word. + * @returns {Function} Returns the new compounder function. + */ + function createCompounder(callback) { + return function(string) { + return arrayReduce(words(deburr(string).replace(reApos, '')), callback, ''); + }; + } + + /** + * Creates a function that produces an instance of `Ctor` regardless of + * whether it was invoked as part of a `new` expression or by `call` or `apply`. + * + * @private + * @param {Function} Ctor The constructor to wrap. + * @returns {Function} Returns the new wrapped function. + */ + function createCtor(Ctor) { + return function() { + // Use a `switch` statement to work with class constructors. See + // http://ecma-international.org/ecma-262/7.0/#sec-ecmascript-function-objects-call-thisargument-argumentslist + // for more details. + var args = arguments; + switch (args.length) { + case 0: return new Ctor; + case 1: return new Ctor(args[0]); + case 2: return new Ctor(args[0], args[1]); + case 3: return new Ctor(args[0], args[1], args[2]); + case 4: return new Ctor(args[0], args[1], args[2], args[3]); + case 5: return new Ctor(args[0], args[1], args[2], args[3], args[4]); + case 6: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5]); + case 7: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5], args[6]); + } + var thisBinding = baseCreate(Ctor.prototype), + result = Ctor.apply(thisBinding, args); + + // Mimic the constructor's `return` behavior. + // See https://es5.github.io/#x13.2.2 for more details. + return isObject(result) ? result : thisBinding; + }; + } + + /** + * Creates a function that wraps `func` to enable currying. + * + * @private + * @param {Function} func The function to wrap. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @param {number} arity The arity of `func`. + * @returns {Function} Returns the new wrapped function. + */ + function createCurry(func, bitmask, arity) { + var Ctor = createCtor(func); + + function wrapper() { + var length = arguments.length, + args = Array(length), + index = length, + placeholder = getHolder(wrapper); + + while (index--) { + args[index] = arguments[index]; + } + var holders = (length < 3 && args[0] !== placeholder && args[length - 1] !== placeholder) + ? [] + : replaceHolders(args, placeholder); + + length -= holders.length; + if (length < arity) { + return createRecurry( + func, bitmask, createHybrid, wrapper.placeholder, undefined, + args, holders, undefined, undefined, arity - length); + } + var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; + return apply(fn, this, args); + } + return wrapper; + } + + /** + * Creates a `_.find` or `_.findLast` function. + * + * @private + * @param {Function} findIndexFunc The function to find the collection index. + * @returns {Function} Returns the new find function. + */ + function createFind(findIndexFunc) { + return function(collection, predicate, fromIndex) { + var iterable = Object(collection); + if (!isArrayLike(collection)) { + var iteratee = getIteratee(predicate, 3); + collection = keys(collection); + predicate = function(key) { return iteratee(iterable[key], key, iterable); }; + } + var index = findIndexFunc(collection, predicate, fromIndex); + return index > -1 ? iterable[iteratee ? collection[index] : index] : undefined; + }; + } + + /** + * Creates a `_.flow` or `_.flowRight` function. + * + * @private + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Function} Returns the new flow function. + */ + function createFlow(fromRight) { + return flatRest(function(funcs) { + var length = funcs.length, + index = length, + prereq = LodashWrapper.prototype.thru; + + if (fromRight) { + funcs.reverse(); + } + while (index--) { + var func = funcs[index]; + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + if (prereq && !wrapper && getFuncName(func) == 'wrapper') { + var wrapper = new LodashWrapper([], true); + } + } + index = wrapper ? index : length; + while (++index < length) { + func = funcs[index]; + + var funcName = getFuncName(func), + data = funcName == 'wrapper' ? getData(func) : undefined; + + if (data && isLaziable(data[0]) && + data[1] == (WRAP_ARY_FLAG | WRAP_CURRY_FLAG | WRAP_PARTIAL_FLAG | WRAP_REARG_FLAG) && + !data[4].length && data[9] == 1 + ) { + wrapper = wrapper[getFuncName(data[0])].apply(wrapper, data[3]); + } else { + wrapper = (func.length == 1 && isLaziable(func)) + ? wrapper[funcName]() + : wrapper.thru(func); + } + } + return function() { + var args = arguments, + value = args[0]; + + if (wrapper && args.length == 1 && isArray(value)) { + return wrapper.plant(value).value(); + } + var index = 0, + result = length ? funcs[index].apply(this, args) : value; + + while (++index < length) { + result = funcs[index].call(this, result); + } + return result; + }; + }); + } + + /** + * Creates a function that wraps `func` to invoke it with optional `this` + * binding of `thisArg`, partial application, and currying. + * + * @private + * @param {Function|string} func The function or method name to wrap. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @param {*} [thisArg] The `this` binding of `func`. + * @param {Array} [partials] The arguments to prepend to those provided to + * the new function. + * @param {Array} [holders] The `partials` placeholder indexes. + * @param {Array} [partialsRight] The arguments to append to those provided + * to the new function. + * @param {Array} [holdersRight] The `partialsRight` placeholder indexes. + * @param {Array} [argPos] The argument positions of the new function. + * @param {number} [ary] The arity cap of `func`. + * @param {number} [arity] The arity of `func`. + * @returns {Function} Returns the new wrapped function. + */ + function createHybrid(func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, argPos, ary, arity) { + var isAry = bitmask & WRAP_ARY_FLAG, + isBind = bitmask & WRAP_BIND_FLAG, + isBindKey = bitmask & WRAP_BIND_KEY_FLAG, + isCurried = bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG), + isFlip = bitmask & WRAP_FLIP_FLAG, + Ctor = isBindKey ? undefined : createCtor(func); + + function wrapper() { + var length = arguments.length, + args = Array(length), + index = length; + + while (index--) { + args[index] = arguments[index]; + } + if (isCurried) { + var placeholder = getHolder(wrapper), + holdersCount = countHolders(args, placeholder); + } + if (partials) { + args = composeArgs(args, partials, holders, isCurried); + } + if (partialsRight) { + args = composeArgsRight(args, partialsRight, holdersRight, isCurried); + } + length -= holdersCount; + if (isCurried && length < arity) { + var newHolders = replaceHolders(args, placeholder); + return createRecurry( + func, bitmask, createHybrid, wrapper.placeholder, thisArg, + args, newHolders, argPos, ary, arity - length + ); + } + var thisBinding = isBind ? thisArg : this, + fn = isBindKey ? thisBinding[func] : func; + + length = args.length; + if (argPos) { + args = reorder(args, argPos); + } else if (isFlip && length > 1) { + args.reverse(); + } + if (isAry && ary < length) { + args.length = ary; + } + if (this && this !== root && this instanceof wrapper) { + fn = Ctor || createCtor(fn); + } + return fn.apply(thisBinding, args); + } + return wrapper; + } + + /** + * Creates a function like `_.invertBy`. + * + * @private + * @param {Function} setter The function to set accumulator values. + * @param {Function} toIteratee The function to resolve iteratees. + * @returns {Function} Returns the new inverter function. + */ + function createInverter(setter, toIteratee) { + return function(object, iteratee) { + return baseInverter(object, setter, toIteratee(iteratee), {}); + }; + } + + /** + * Creates a function that performs a mathematical operation on two values. + * + * @private + * @param {Function} operator The function to perform the operation. + * @param {number} [defaultValue] The value used for `undefined` arguments. + * @returns {Function} Returns the new mathematical operation function. + */ + function createMathOperation(operator, defaultValue) { + return function(value, other) { + var result; + if (value === undefined && other === undefined) { + return defaultValue; + } + if (value !== undefined) { + result = value; + } + if (other !== undefined) { + if (result === undefined) { + return other; + } + if (typeof value == 'string' || typeof other == 'string') { + value = baseToString(value); + other = baseToString(other); + } else { + value = baseToNumber(value); + other = baseToNumber(other); + } + result = operator(value, other); + } + return result; + }; + } + + /** + * Creates a function like `_.over`. + * + * @private + * @param {Function} arrayFunc The function to iterate over iteratees. + * @returns {Function} Returns the new over function. + */ + function createOver(arrayFunc) { + return flatRest(function(iteratees) { + iteratees = arrayMap(iteratees, baseUnary(getIteratee())); + return baseRest(function(args) { + var thisArg = this; + return arrayFunc(iteratees, function(iteratee) { + return apply(iteratee, thisArg, args); + }); + }); + }); + } + + /** + * Creates the padding for `string` based on `length`. The `chars` string + * is truncated if the number of characters exceeds `length`. + * + * @private + * @param {number} length The padding length. + * @param {string} [chars=' '] The string used as padding. + * @returns {string} Returns the padding for `string`. + */ + function createPadding(length, chars) { + chars = chars === undefined ? ' ' : baseToString(chars); + + var charsLength = chars.length; + if (charsLength < 2) { + return charsLength ? baseRepeat(chars, length) : chars; + } + var result = baseRepeat(chars, nativeCeil(length / stringSize(chars))); + return hasUnicode(chars) + ? castSlice(stringToArray(result), 0, length).join('') + : result.slice(0, length); + } + + /** + * Creates a function that wraps `func` to invoke it with the `this` binding + * of `thisArg` and `partials` prepended to the arguments it receives. + * + * @private + * @param {Function} func The function to wrap. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @param {*} thisArg The `this` binding of `func`. + * @param {Array} partials The arguments to prepend to those provided to + * the new function. + * @returns {Function} Returns the new wrapped function. + */ + function createPartial(func, bitmask, thisArg, partials) { + var isBind = bitmask & WRAP_BIND_FLAG, + Ctor = createCtor(func); + + function wrapper() { + var argsIndex = -1, + argsLength = arguments.length, + leftIndex = -1, + leftLength = partials.length, + args = Array(leftLength + argsLength), + fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; + + while (++leftIndex < leftLength) { + args[leftIndex] = partials[leftIndex]; + } + while (argsLength--) { + args[leftIndex++] = arguments[++argsIndex]; + } + return apply(fn, isBind ? thisArg : this, args); + } + return wrapper; + } + + /** + * Creates a `_.range` or `_.rangeRight` function. + * + * @private + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Function} Returns the new range function. + */ + function createRange(fromRight) { + return function(start, end, step) { + if (step && typeof step != 'number' && isIterateeCall(start, end, step)) { + end = step = undefined; + } + // Ensure the sign of `-0` is preserved. + start = toFinite(start); + if (end === undefined) { + end = start; + start = 0; + } else { + end = toFinite(end); + } + step = step === undefined ? (start < end ? 1 : -1) : toFinite(step); + return baseRange(start, end, step, fromRight); + }; + } + + /** + * Creates a function that performs a relational operation on two values. + * + * @private + * @param {Function} operator The function to perform the operation. + * @returns {Function} Returns the new relational operation function. + */ + function createRelationalOperation(operator) { + return function(value, other) { + if (!(typeof value == 'string' && typeof other == 'string')) { + value = toNumber(value); + other = toNumber(other); + } + return operator(value, other); + }; + } + + /** + * Creates a function that wraps `func` to continue currying. + * + * @private + * @param {Function} func The function to wrap. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @param {Function} wrapFunc The function to create the `func` wrapper. + * @param {*} placeholder The placeholder value. + * @param {*} [thisArg] The `this` binding of `func`. + * @param {Array} [partials] The arguments to prepend to those provided to + * the new function. + * @param {Array} [holders] The `partials` placeholder indexes. + * @param {Array} [argPos] The argument positions of the new function. + * @param {number} [ary] The arity cap of `func`. + * @param {number} [arity] The arity of `func`. + * @returns {Function} Returns the new wrapped function. + */ + function createRecurry(func, bitmask, wrapFunc, placeholder, thisArg, partials, holders, argPos, ary, arity) { + var isCurry = bitmask & WRAP_CURRY_FLAG, + newHolders = isCurry ? holders : undefined, + newHoldersRight = isCurry ? undefined : holders, + newPartials = isCurry ? partials : undefined, + newPartialsRight = isCurry ? undefined : partials; + + bitmask |= (isCurry ? WRAP_PARTIAL_FLAG : WRAP_PARTIAL_RIGHT_FLAG); + bitmask &= ~(isCurry ? WRAP_PARTIAL_RIGHT_FLAG : WRAP_PARTIAL_FLAG); + + if (!(bitmask & WRAP_CURRY_BOUND_FLAG)) { + bitmask &= ~(WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG); + } + var newData = [ + func, bitmask, thisArg, newPartials, newHolders, newPartialsRight, + newHoldersRight, argPos, ary, arity + ]; + + var result = wrapFunc.apply(undefined, newData); + if (isLaziable(func)) { + setData(result, newData); + } + result.placeholder = placeholder; + return setWrapToString(result, func, bitmask); + } + + /** + * Creates a function like `_.round`. + * + * @private + * @param {string} methodName The name of the `Math` method to use when rounding. + * @returns {Function} Returns the new round function. + */ + function createRound(methodName) { + var func = Math[methodName]; + return function(number, precision) { + number = toNumber(number); + precision = precision == null ? 0 : nativeMin(toInteger(precision), 292); + if (precision) { + // Shift with exponential notation to avoid floating-point issues. + // See [MDN](https://mdn.io/round#Examples) for more details. + var pair = (toString(number) + 'e').split('e'), + value = func(pair[0] + 'e' + (+pair[1] + precision)); + + pair = (toString(value) + 'e').split('e'); + return +(pair[0] + 'e' + (+pair[1] - precision)); + } + return func(number); + }; + } + + /** + * Creates a set object of `values`. + * + * @private + * @param {Array} values The values to add to the set. + * @returns {Object} Returns the new set. + */ + var createSet = !(Set && (1 / setToArray(new Set([,-0]))[1]) == INFINITY) ? noop : function(values) { + return new Set(values); + }; + + /** + * Creates a `_.toPairs` or `_.toPairsIn` function. + * + * @private + * @param {Function} keysFunc The function to get the keys of a given object. + * @returns {Function} Returns the new pairs function. + */ + function createToPairs(keysFunc) { + return function(object) { + var tag = getTag(object); + if (tag == mapTag) { + return mapToArray(object); + } + if (tag == setTag) { + return setToPairs(object); + } + return baseToPairs(object, keysFunc(object)); + }; + } + + /** + * Creates a function that either curries or invokes `func` with optional + * `this` binding and partially applied arguments. + * + * @private + * @param {Function|string} func The function or method name to wrap. + * @param {number} bitmask The bitmask flags. + * 1 - `_.bind` + * 2 - `_.bindKey` + * 4 - `_.curry` or `_.curryRight` of a bound function + * 8 - `_.curry` + * 16 - `_.curryRight` + * 32 - `_.partial` + * 64 - `_.partialRight` + * 128 - `_.rearg` + * 256 - `_.ary` + * 512 - `_.flip` + * @param {*} [thisArg] The `this` binding of `func`. + * @param {Array} [partials] The arguments to be partially applied. + * @param {Array} [holders] The `partials` placeholder indexes. + * @param {Array} [argPos] The argument positions of the new function. + * @param {number} [ary] The arity cap of `func`. + * @param {number} [arity] The arity of `func`. + * @returns {Function} Returns the new wrapped function. + */ + function createWrap(func, bitmask, thisArg, partials, holders, argPos, ary, arity) { + var isBindKey = bitmask & WRAP_BIND_KEY_FLAG; + if (!isBindKey && typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + var length = partials ? partials.length : 0; + if (!length) { + bitmask &= ~(WRAP_PARTIAL_FLAG | WRAP_PARTIAL_RIGHT_FLAG); + partials = holders = undefined; + } + ary = ary === undefined ? ary : nativeMax(toInteger(ary), 0); + arity = arity === undefined ? arity : toInteger(arity); + length -= holders ? holders.length : 0; + + if (bitmask & WRAP_PARTIAL_RIGHT_FLAG) { + var partialsRight = partials, + holdersRight = holders; + + partials = holders = undefined; + } + var data = isBindKey ? undefined : getData(func); + + var newData = [ + func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, + argPos, ary, arity + ]; + + if (data) { + mergeData(newData, data); + } + func = newData[0]; + bitmask = newData[1]; + thisArg = newData[2]; + partials = newData[3]; + holders = newData[4]; + arity = newData[9] = newData[9] === undefined + ? (isBindKey ? 0 : func.length) + : nativeMax(newData[9] - length, 0); + + if (!arity && bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG)) { + bitmask &= ~(WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG); + } + if (!bitmask || bitmask == WRAP_BIND_FLAG) { + var result = createBind(func, bitmask, thisArg); + } else if (bitmask == WRAP_CURRY_FLAG || bitmask == WRAP_CURRY_RIGHT_FLAG) { + result = createCurry(func, bitmask, arity); + } else if ((bitmask == WRAP_PARTIAL_FLAG || bitmask == (WRAP_BIND_FLAG | WRAP_PARTIAL_FLAG)) && !holders.length) { + result = createPartial(func, bitmask, thisArg, partials); + } else { + result = createHybrid.apply(undefined, newData); + } + var setter = data ? baseSetData : setData; + return setWrapToString(setter(result, newData), func, bitmask); + } + + /** + * Used by `_.defaults` to customize its `_.assignIn` use to assign properties + * of source objects to the destination object for all destination properties + * that resolve to `undefined`. + * + * @private + * @param {*} objValue The destination value. + * @param {*} srcValue The source value. + * @param {string} key The key of the property to assign. + * @param {Object} object The parent object of `objValue`. + * @returns {*} Returns the value to assign. + */ + function customDefaultsAssignIn(objValue, srcValue, key, object) { + if (objValue === undefined || + (eq(objValue, objectProto[key]) && !hasOwnProperty.call(object, key))) { + return srcValue; + } + return objValue; + } + + /** + * Used by `_.defaultsDeep` to customize its `_.merge` use to merge source + * objects into destination objects that are passed thru. + * + * @private + * @param {*} objValue The destination value. + * @param {*} srcValue The source value. + * @param {string} key The key of the property to merge. + * @param {Object} object The parent object of `objValue`. + * @param {Object} source The parent object of `srcValue`. + * @param {Object} [stack] Tracks traversed source values and their merged + * counterparts. + * @returns {*} Returns the value to assign. + */ + function customDefaultsMerge(objValue, srcValue, key, object, source, stack) { + if (isObject(objValue) && isObject(srcValue)) { + // Recursively merge objects and arrays (susceptible to call stack limits). + stack.set(srcValue, objValue); + baseMerge(objValue, srcValue, undefined, customDefaultsMerge, stack); + stack['delete'](srcValue); + } + return objValue; + } + + /** + * Used by `_.omit` to customize its `_.cloneDeep` use to only clone plain + * objects. + * + * @private + * @param {*} value The value to inspect. + * @param {string} key The key of the property to inspect. + * @returns {*} Returns the uncloned value or `undefined` to defer cloning to `_.cloneDeep`. + */ + function customOmitClone(value) { + return isPlainObject(value) ? undefined : value; + } + + /** + * A specialized version of `baseIsEqualDeep` for arrays with support for + * partial deep comparisons. + * + * @private + * @param {Array} array The array to compare. + * @param {Array} other The other array to compare. + * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. + * @param {Function} customizer The function to customize comparisons. + * @param {Function} equalFunc The function to determine equivalents of values. + * @param {Object} stack Tracks traversed `array` and `other` objects. + * @returns {boolean} Returns `true` if the arrays are equivalent, else `false`. + */ + function equalArrays(array, other, bitmask, customizer, equalFunc, stack) { + var isPartial = bitmask & COMPARE_PARTIAL_FLAG, + arrLength = array.length, + othLength = other.length; + + if (arrLength != othLength && !(isPartial && othLength > arrLength)) { + return false; + } + // Assume cyclic values are equal. + var stacked = stack.get(array); + if (stacked && stack.get(other)) { + return stacked == other; + } + var index = -1, + result = true, + seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined; + + stack.set(array, other); + stack.set(other, array); + + // Ignore non-index properties. + while (++index < arrLength) { + var arrValue = array[index], + othValue = other[index]; + + if (customizer) { + var compared = isPartial + ? customizer(othValue, arrValue, index, other, array, stack) + : customizer(arrValue, othValue, index, array, other, stack); + } + if (compared !== undefined) { + if (compared) { + continue; + } + result = false; + break; + } + // Recursively compare arrays (susceptible to call stack limits). + if (seen) { + if (!arraySome(other, function(othValue, othIndex) { + if (!cacheHas(seen, othIndex) && + (arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) { + return seen.push(othIndex); + } + })) { + result = false; + break; + } + } else if (!( + arrValue === othValue || + equalFunc(arrValue, othValue, bitmask, customizer, stack) + )) { + result = false; + break; + } + } + stack['delete'](array); + stack['delete'](other); + return result; + } + + /** + * A specialized version of `baseIsEqualDeep` for comparing objects of + * the same `toStringTag`. + * + * **Note:** This function only supports comparing values with tags of + * `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`. + * + * @private + * @param {Object} object The object to compare. + * @param {Object} other The other object to compare. + * @param {string} tag The `toStringTag` of the objects to compare. + * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. + * @param {Function} customizer The function to customize comparisons. + * @param {Function} equalFunc The function to determine equivalents of values. + * @param {Object} stack Tracks traversed `object` and `other` objects. + * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. + */ + function equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) { + switch (tag) { + case dataViewTag: + if ((object.byteLength != other.byteLength) || + (object.byteOffset != other.byteOffset)) { + return false; + } + object = object.buffer; + other = other.buffer; + + case arrayBufferTag: + if ((object.byteLength != other.byteLength) || + !equalFunc(new Uint8Array(object), new Uint8Array(other))) { + return false; + } + return true; + + case boolTag: + case dateTag: + case numberTag: + // Coerce booleans to `1` or `0` and dates to milliseconds. + // Invalid dates are coerced to `NaN`. + return eq(+object, +other); + + case errorTag: + return object.name == other.name && object.message == other.message; + + case regexpTag: + case stringTag: + // Coerce regexes to strings and treat strings, primitives and objects, + // as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring + // for more details. + return object == (other + ''); + + case mapTag: + var convert = mapToArray; + + case setTag: + var isPartial = bitmask & COMPARE_PARTIAL_FLAG; + convert || (convert = setToArray); + + if (object.size != other.size && !isPartial) { + return false; + } + // Assume cyclic values are equal. + var stacked = stack.get(object); + if (stacked) { + return stacked == other; + } + bitmask |= COMPARE_UNORDERED_FLAG; + + // Recursively compare objects (susceptible to call stack limits). + stack.set(object, other); + var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack); + stack['delete'](object); + return result; + + case symbolTag: + if (symbolValueOf) { + return symbolValueOf.call(object) == symbolValueOf.call(other); + } + } + return false; + } + + /** + * A specialized version of `baseIsEqualDeep` for objects with support for + * partial deep comparisons. + * + * @private + * @param {Object} object The object to compare. + * @param {Object} other The other object to compare. + * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. + * @param {Function} customizer The function to customize comparisons. + * @param {Function} equalFunc The function to determine equivalents of values. + * @param {Object} stack Tracks traversed `object` and `other` objects. + * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. + */ + function equalObjects(object, other, bitmask, customizer, equalFunc, stack) { + var isPartial = bitmask & COMPARE_PARTIAL_FLAG, + objProps = getAllKeys(object), + objLength = objProps.length, + othProps = getAllKeys(other), + othLength = othProps.length; + + if (objLength != othLength && !isPartial) { + return false; + } + var index = objLength; + while (index--) { + var key = objProps[index]; + if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) { + return false; + } + } + // Assume cyclic values are equal. + var stacked = stack.get(object); + if (stacked && stack.get(other)) { + return stacked == other; + } + var result = true; + stack.set(object, other); + stack.set(other, object); + + var skipCtor = isPartial; + while (++index < objLength) { + key = objProps[index]; + var objValue = object[key], + othValue = other[key]; + + if (customizer) { + var compared = isPartial + ? customizer(othValue, objValue, key, other, object, stack) + : customizer(objValue, othValue, key, object, other, stack); + } + // Recursively compare objects (susceptible to call stack limits). + if (!(compared === undefined + ? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack)) + : compared + )) { + result = false; + break; + } + skipCtor || (skipCtor = key == 'constructor'); + } + if (result && !skipCtor) { + var objCtor = object.constructor, + othCtor = other.constructor; + + // Non `Object` object instances with different constructors are not equal. + if (objCtor != othCtor && + ('constructor' in object && 'constructor' in other) && + !(typeof objCtor == 'function' && objCtor instanceof objCtor && + typeof othCtor == 'function' && othCtor instanceof othCtor)) { + result = false; + } + } + stack['delete'](object); + stack['delete'](other); + return result; + } + + /** + * A specialized version of `baseRest` which flattens the rest array. + * + * @private + * @param {Function} func The function to apply a rest parameter to. + * @returns {Function} Returns the new function. + */ + function flatRest(func) { + return setToString(overRest(func, undefined, flatten), func + ''); + } + + /** + * Creates an array of own enumerable property names and symbols of `object`. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names and symbols. + */ + function getAllKeys(object) { + return baseGetAllKeys(object, keys, getSymbols); + } + + /** + * Creates an array of own and inherited enumerable property names and + * symbols of `object`. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names and symbols. + */ + function getAllKeysIn(object) { + return baseGetAllKeys(object, keysIn, getSymbolsIn); + } + + /** + * Gets metadata for `func`. + * + * @private + * @param {Function} func The function to query. + * @returns {*} Returns the metadata for `func`. + */ + var getData = !metaMap ? noop : function(func) { + return metaMap.get(func); + }; + + /** + * Gets the name of `func`. + * + * @private + * @param {Function} func The function to query. + * @returns {string} Returns the function name. + */ + function getFuncName(func) { + var result = (func.name + ''), + array = realNames[result], + length = hasOwnProperty.call(realNames, result) ? array.length : 0; + + while (length--) { + var data = array[length], + otherFunc = data.func; + if (otherFunc == null || otherFunc == func) { + return data.name; + } + } + return result; + } + + /** + * Gets the argument placeholder value for `func`. + * + * @private + * @param {Function} func The function to inspect. + * @returns {*} Returns the placeholder value. + */ + function getHolder(func) { + var object = hasOwnProperty.call(lodash, 'placeholder') ? lodash : func; + return object.placeholder; + } + + /** + * Gets the appropriate "iteratee" function. If `_.iteratee` is customized, + * this function returns the custom method, otherwise it returns `baseIteratee`. + * If arguments are provided, the chosen function is invoked with them and + * its result is returned. + * + * @private + * @param {*} [value] The value to convert to an iteratee. + * @param {number} [arity] The arity of the created iteratee. + * @returns {Function} Returns the chosen function or its result. + */ + function getIteratee() { + var result = lodash.iteratee || iteratee; + result = result === iteratee ? baseIteratee : result; + return arguments.length ? result(arguments[0], arguments[1]) : result; + } + + /** + * Gets the data for `map`. + * + * @private + * @param {Object} map The map to query. + * @param {string} key The reference key. + * @returns {*} Returns the map data. + */ + function getMapData(map, key) { + var data = map.__data__; + return isKeyable(key) + ? data[typeof key == 'string' ? 'string' : 'hash'] + : data.map; + } + + /** + * Gets the property names, values, and compare flags of `object`. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the match data of `object`. + */ + function getMatchData(object) { + var result = keys(object), + length = result.length; + + while (length--) { + var key = result[length], + value = object[key]; + + result[length] = [key, value, isStrictComparable(value)]; + } + return result; + } + + /** + * Gets the native function at `key` of `object`. + * + * @private + * @param {Object} object The object to query. + * @param {string} key The key of the method to get. + * @returns {*} Returns the function if it's native, else `undefined`. + */ + function getNative(object, key) { + var value = getValue(object, key); + return baseIsNative(value) ? value : undefined; + } + + /** + * A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values. + * + * @private + * @param {*} value The value to query. + * @returns {string} Returns the raw `toStringTag`. + */ + function getRawTag(value) { + var isOwn = hasOwnProperty.call(value, symToStringTag), + tag = value[symToStringTag]; + + try { + value[symToStringTag] = undefined; + var unmasked = true; + } catch (e) {} + + var result = nativeObjectToString.call(value); + if (unmasked) { + if (isOwn) { + value[symToStringTag] = tag; + } else { + delete value[symToStringTag]; + } + } + return result; + } + + /** + * Creates an array of the own enumerable symbols of `object`. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of symbols. + */ + var getSymbols = !nativeGetSymbols ? stubArray : function(object) { + if (object == null) { + return []; + } + object = Object(object); + return arrayFilter(nativeGetSymbols(object), function(symbol) { + return propertyIsEnumerable.call(object, symbol); + }); + }; + + /** + * Creates an array of the own and inherited enumerable symbols of `object`. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of symbols. + */ + var getSymbolsIn = !nativeGetSymbols ? stubArray : function(object) { + var result = []; + while (object) { + arrayPush(result, getSymbols(object)); + object = getPrototype(object); + } + return result; + }; + + /** + * Gets the `toStringTag` of `value`. + * + * @private + * @param {*} value The value to query. + * @returns {string} Returns the `toStringTag`. + */ + var getTag = baseGetTag; + + // Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6. + if ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) || + (Map && getTag(new Map) != mapTag) || + (Promise && getTag(Promise.resolve()) != promiseTag) || + (Set && getTag(new Set) != setTag) || + (WeakMap && getTag(new WeakMap) != weakMapTag)) { + getTag = function(value) { + var result = baseGetTag(value), + Ctor = result == objectTag ? value.constructor : undefined, + ctorString = Ctor ? toSource(Ctor) : ''; + + if (ctorString) { + switch (ctorString) { + case dataViewCtorString: return dataViewTag; + case mapCtorString: return mapTag; + case promiseCtorString: return promiseTag; + case setCtorString: return setTag; + case weakMapCtorString: return weakMapTag; + } + } + return result; + }; + } + + /** + * Gets the view, applying any `transforms` to the `start` and `end` positions. + * + * @private + * @param {number} start The start of the view. + * @param {number} end The end of the view. + * @param {Array} transforms The transformations to apply to the view. + * @returns {Object} Returns an object containing the `start` and `end` + * positions of the view. + */ + function getView(start, end, transforms) { + var index = -1, + length = transforms.length; + + while (++index < length) { + var data = transforms[index], + size = data.size; + + switch (data.type) { + case 'drop': start += size; break; + case 'dropRight': end -= size; break; + case 'take': end = nativeMin(end, start + size); break; + case 'takeRight': start = nativeMax(start, end - size); break; + } + } + return { 'start': start, 'end': end }; + } + + /** + * Extracts wrapper details from the `source` body comment. + * + * @private + * @param {string} source The source to inspect. + * @returns {Array} Returns the wrapper details. + */ + function getWrapDetails(source) { + var match = source.match(reWrapDetails); + return match ? match[1].split(reSplitDetails) : []; + } + + /** + * Checks if `path` exists on `object`. + * + * @private + * @param {Object} object The object to query. + * @param {Array|string} path The path to check. + * @param {Function} hasFunc The function to check properties. + * @returns {boolean} Returns `true` if `path` exists, else `false`. + */ + function hasPath(object, path, hasFunc) { + path = castPath(path, object); + + var index = -1, + length = path.length, + result = false; + + while (++index < length) { + var key = toKey(path[index]); + if (!(result = object != null && hasFunc(object, key))) { + break; + } + object = object[key]; + } + if (result || ++index != length) { + return result; + } + length = object == null ? 0 : object.length; + return !!length && isLength(length) && isIndex(key, length) && + (isArray(object) || isArguments(object)); + } + + /** + * Initializes an array clone. + * + * @private + * @param {Array} array The array to clone. + * @returns {Array} Returns the initialized clone. + */ + function initCloneArray(array) { + var length = array.length, + result = new array.constructor(length); + + // Add properties assigned by `RegExp#exec`. + if (length && typeof array[0] == 'string' && hasOwnProperty.call(array, 'index')) { + result.index = array.index; + result.input = array.input; + } + return result; + } + + /** + * Initializes an object clone. + * + * @private + * @param {Object} object The object to clone. + * @returns {Object} Returns the initialized clone. + */ + function initCloneObject(object) { + return (typeof object.constructor == 'function' && !isPrototype(object)) + ? baseCreate(getPrototype(object)) + : {}; + } + + /** + * Initializes an object clone based on its `toStringTag`. + * + * **Note:** This function only supports cloning values with tags of + * `Boolean`, `Date`, `Error`, `Map`, `Number`, `RegExp`, `Set`, or `String`. + * + * @private + * @param {Object} object The object to clone. + * @param {string} tag The `toStringTag` of the object to clone. + * @param {boolean} [isDeep] Specify a deep clone. + * @returns {Object} Returns the initialized clone. + */ + function initCloneByTag(object, tag, isDeep) { + var Ctor = object.constructor; + switch (tag) { + case arrayBufferTag: + return cloneArrayBuffer(object); + + case boolTag: + case dateTag: + return new Ctor(+object); + + case dataViewTag: + return cloneDataView(object, isDeep); + + case float32Tag: case float64Tag: + case int8Tag: case int16Tag: case int32Tag: + case uint8Tag: case uint8ClampedTag: case uint16Tag: case uint32Tag: + return cloneTypedArray(object, isDeep); + + case mapTag: + return new Ctor; + + case numberTag: + case stringTag: + return new Ctor(object); + + case regexpTag: + return cloneRegExp(object); + + case setTag: + return new Ctor; + + case symbolTag: + return cloneSymbol(object); + } + } + + /** + * Inserts wrapper `details` in a comment at the top of the `source` body. + * + * @private + * @param {string} source The source to modify. + * @returns {Array} details The details to insert. + * @returns {string} Returns the modified source. + */ + function insertWrapDetails(source, details) { + var length = details.length; + if (!length) { + return source; + } + var lastIndex = length - 1; + details[lastIndex] = (length > 1 ? '& ' : '') + details[lastIndex]; + details = details.join(length > 2 ? ', ' : ' '); + return source.replace(reWrapComment, '{\n/* [wrapped with ' + details + '] */\n'); + } + + /** + * Checks if `value` is a flattenable `arguments` object or array. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is flattenable, else `false`. + */ + function isFlattenable(value) { + return isArray(value) || isArguments(value) || + !!(spreadableSymbol && value && value[spreadableSymbol]); + } + + /** + * Checks if `value` is a valid array-like index. + * + * @private + * @param {*} value The value to check. + * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. + * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. + */ + function isIndex(value, length) { + var type = typeof value; + length = length == null ? MAX_SAFE_INTEGER : length; + + return !!length && + (type == 'number' || + (type != 'symbol' && reIsUint.test(value))) && + (value > -1 && value % 1 == 0 && value < length); + } + + /** + * Checks if the given arguments are from an iteratee call. + * + * @private + * @param {*} value The potential iteratee value argument. + * @param {*} index The potential iteratee index or key argument. + * @param {*} object The potential iteratee object argument. + * @returns {boolean} Returns `true` if the arguments are from an iteratee call, + * else `false`. + */ + function isIterateeCall(value, index, object) { + if (!isObject(object)) { + return false; + } + var type = typeof index; + if (type == 'number' + ? (isArrayLike(object) && isIndex(index, object.length)) + : (type == 'string' && index in object) + ) { + return eq(object[index], value); + } + return false; + } + + /** + * Checks if `value` is a property name and not a property path. + * + * @private + * @param {*} value The value to check. + * @param {Object} [object] The object to query keys on. + * @returns {boolean} Returns `true` if `value` is a property name, else `false`. + */ + function isKey(value, object) { + if (isArray(value)) { + return false; + } + var type = typeof value; + if (type == 'number' || type == 'symbol' || type == 'boolean' || + value == null || isSymbol(value)) { + return true; + } + return reIsPlainProp.test(value) || !reIsDeepProp.test(value) || + (object != null && value in Object(object)); + } + + /** + * Checks if `value` is suitable for use as unique object key. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is suitable, else `false`. + */ + function isKeyable(value) { + var type = typeof value; + return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean') + ? (value !== '__proto__') + : (value === null); + } + + /** + * Checks if `func` has a lazy counterpart. + * + * @private + * @param {Function} func The function to check. + * @returns {boolean} Returns `true` if `func` has a lazy counterpart, + * else `false`. + */ + function isLaziable(func) { + var funcName = getFuncName(func), + other = lodash[funcName]; + + if (typeof other != 'function' || !(funcName in LazyWrapper.prototype)) { + return false; + } + if (func === other) { + return true; + } + var data = getData(other); + return !!data && func === data[0]; + } + + /** + * Checks if `func` has its source masked. + * + * @private + * @param {Function} func The function to check. + * @returns {boolean} Returns `true` if `func` is masked, else `false`. + */ + function isMasked(func) { + return !!maskSrcKey && (maskSrcKey in func); + } + + /** + * Checks if `func` is capable of being masked. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `func` is maskable, else `false`. + */ + var isMaskable = coreJsData ? isFunction : stubFalse; + + /** + * Checks if `value` is likely a prototype object. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a prototype, else `false`. + */ + function isPrototype(value) { + var Ctor = value && value.constructor, + proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto; + + return value === proto; + } + + /** + * Checks if `value` is suitable for strict equality comparisons, i.e. `===`. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` if suitable for strict + * equality comparisons, else `false`. + */ + function isStrictComparable(value) { + return value === value && !isObject(value); + } + + /** + * A specialized version of `matchesProperty` for source values suitable + * for strict equality comparisons, i.e. `===`. + * + * @private + * @param {string} key The key of the property to get. + * @param {*} srcValue The value to match. + * @returns {Function} Returns the new spec function. + */ + function matchesStrictComparable(key, srcValue) { + return function(object) { + if (object == null) { + return false; + } + return object[key] === srcValue && + (srcValue !== undefined || (key in Object(object))); + }; + } + + /** + * A specialized version of `_.memoize` which clears the memoized function's + * cache when it exceeds `MAX_MEMOIZE_SIZE`. + * + * @private + * @param {Function} func The function to have its output memoized. + * @returns {Function} Returns the new memoized function. + */ + function memoizeCapped(func) { + var result = memoize(func, function(key) { + if (cache.size === MAX_MEMOIZE_SIZE) { + cache.clear(); + } + return key; + }); + + var cache = result.cache; + return result; + } + + /** + * Merges the function metadata of `source` into `data`. + * + * Merging metadata reduces the number of wrappers used to invoke a function. + * This is possible because methods like `_.bind`, `_.curry`, and `_.partial` + * may be applied regardless of execution order. Methods like `_.ary` and + * `_.rearg` modify function arguments, making the order in which they are + * executed important, preventing the merging of metadata. However, we make + * an exception for a safe combined case where curried functions have `_.ary` + * and or `_.rearg` applied. + * + * @private + * @param {Array} data The destination metadata. + * @param {Array} source The source metadata. + * @returns {Array} Returns `data`. + */ + function mergeData(data, source) { + var bitmask = data[1], + srcBitmask = source[1], + newBitmask = bitmask | srcBitmask, + isCommon = newBitmask < (WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG | WRAP_ARY_FLAG); + + var isCombo = + ((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_CURRY_FLAG)) || + ((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_REARG_FLAG) && (data[7].length <= source[8])) || + ((srcBitmask == (WRAP_ARY_FLAG | WRAP_REARG_FLAG)) && (source[7].length <= source[8]) && (bitmask == WRAP_CURRY_FLAG)); + + // Exit early if metadata can't be merged. + if (!(isCommon || isCombo)) { + return data; + } + // Use source `thisArg` if available. + if (srcBitmask & WRAP_BIND_FLAG) { + data[2] = source[2]; + // Set when currying a bound function. + newBitmask |= bitmask & WRAP_BIND_FLAG ? 0 : WRAP_CURRY_BOUND_FLAG; + } + // Compose partial arguments. + var value = source[3]; + if (value) { + var partials = data[3]; + data[3] = partials ? composeArgs(partials, value, source[4]) : value; + data[4] = partials ? replaceHolders(data[3], PLACEHOLDER) : source[4]; + } + // Compose partial right arguments. + value = source[5]; + if (value) { + partials = data[5]; + data[5] = partials ? composeArgsRight(partials, value, source[6]) : value; + data[6] = partials ? replaceHolders(data[5], PLACEHOLDER) : source[6]; + } + // Use source `argPos` if available. + value = source[7]; + if (value) { + data[7] = value; + } + // Use source `ary` if it's smaller. + if (srcBitmask & WRAP_ARY_FLAG) { + data[8] = data[8] == null ? source[8] : nativeMin(data[8], source[8]); + } + // Use source `arity` if one is not provided. + if (data[9] == null) { + data[9] = source[9]; + } + // Use source `func` and merge bitmasks. + data[0] = source[0]; + data[1] = newBitmask; + + return data; + } + + /** + * This function is like + * [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) + * except that it includes inherited enumerable properties. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + */ + function nativeKeysIn(object) { + var result = []; + if (object != null) { + for (var key in Object(object)) { + result.push(key); + } + } + return result; + } + + /** + * Converts `value` to a string using `Object.prototype.toString`. + * + * @private + * @param {*} value The value to convert. + * @returns {string} Returns the converted string. + */ + function objectToString(value) { + return nativeObjectToString.call(value); + } + + /** + * A specialized version of `baseRest` which transforms the rest array. + * + * @private + * @param {Function} func The function to apply a rest parameter to. + * @param {number} [start=func.length-1] The start position of the rest parameter. + * @param {Function} transform The rest array transform. + * @returns {Function} Returns the new function. + */ + function overRest(func, start, transform) { + start = nativeMax(start === undefined ? (func.length - 1) : start, 0); + return function() { + var args = arguments, + index = -1, + length = nativeMax(args.length - start, 0), + array = Array(length); + + while (++index < length) { + array[index] = args[start + index]; + } + index = -1; + var otherArgs = Array(start + 1); + while (++index < start) { + otherArgs[index] = args[index]; + } + otherArgs[start] = transform(array); + return apply(func, this, otherArgs); + }; + } + + /** + * Gets the parent value at `path` of `object`. + * + * @private + * @param {Object} object The object to query. + * @param {Array} path The path to get the parent value of. + * @returns {*} Returns the parent value. + */ + function parent(object, path) { + return path.length < 2 ? object : baseGet(object, baseSlice(path, 0, -1)); + } + + /** + * Reorder `array` according to the specified indexes where the element at + * the first index is assigned as the first element, the element at + * the second index is assigned as the second element, and so on. + * + * @private + * @param {Array} array The array to reorder. + * @param {Array} indexes The arranged array indexes. + * @returns {Array} Returns `array`. + */ + function reorder(array, indexes) { + var arrLength = array.length, + length = nativeMin(indexes.length, arrLength), + oldArray = copyArray(array); + + while (length--) { + var index = indexes[length]; + array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined; + } + return array; + } + + /** + * Sets metadata for `func`. + * + * **Note:** If this function becomes hot, i.e. is invoked a lot in a short + * period of time, it will trip its breaker and transition to an identity + * function to avoid garbage collection pauses in V8. See + * [V8 issue 2070](https://bugs.chromium.org/p/v8/issues/detail?id=2070) + * for more details. + * + * @private + * @param {Function} func The function to associate metadata with. + * @param {*} data The metadata. + * @returns {Function} Returns `func`. + */ + var setData = shortOut(baseSetData); + + /** + * A simple wrapper around the global [`setTimeout`](https://mdn.io/setTimeout). + * + * @private + * @param {Function} func The function to delay. + * @param {number} wait The number of milliseconds to delay invocation. + * @returns {number|Object} Returns the timer id or timeout object. + */ + var setTimeout = ctxSetTimeout || function(func, wait) { + return root.setTimeout(func, wait); + }; + + /** + * Sets the `toString` method of `func` to return `string`. + * + * @private + * @param {Function} func The function to modify. + * @param {Function} string The `toString` result. + * @returns {Function} Returns `func`. + */ + var setToString = shortOut(baseSetToString); + + /** + * Sets the `toString` method of `wrapper` to mimic the source of `reference` + * with wrapper details in a comment at the top of the source body. + * + * @private + * @param {Function} wrapper The function to modify. + * @param {Function} reference The reference function. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @returns {Function} Returns `wrapper`. + */ + function setWrapToString(wrapper, reference, bitmask) { + var source = (reference + ''); + return setToString(wrapper, insertWrapDetails(source, updateWrapDetails(getWrapDetails(source), bitmask))); + } + + /** + * Creates a function that'll short out and invoke `identity` instead + * of `func` when it's called `HOT_COUNT` or more times in `HOT_SPAN` + * milliseconds. + * + * @private + * @param {Function} func The function to restrict. + * @returns {Function} Returns the new shortable function. + */ + function shortOut(func) { + var count = 0, + lastCalled = 0; + + return function() { + var stamp = nativeNow(), + remaining = HOT_SPAN - (stamp - lastCalled); + + lastCalled = stamp; + if (remaining > 0) { + if (++count >= HOT_COUNT) { + return arguments[0]; + } + } else { + count = 0; + } + return func.apply(undefined, arguments); + }; + } + + /** + * A specialized version of `_.shuffle` which mutates and sets the size of `array`. + * + * @private + * @param {Array} array The array to shuffle. + * @param {number} [size=array.length] The size of `array`. + * @returns {Array} Returns `array`. + */ + function shuffleSelf(array, size) { + var index = -1, + length = array.length, + lastIndex = length - 1; + + size = size === undefined ? length : size; + while (++index < size) { + var rand = baseRandom(index, lastIndex), + value = array[rand]; + + array[rand] = array[index]; + array[index] = value; + } + array.length = size; + return array; + } + + /** + * Converts `string` to a property path array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the property path array. + */ + var stringToPath = memoizeCapped(function(string) { + var result = []; + if (string.charCodeAt(0) === 46 /* . */) { + result.push(''); + } + string.replace(rePropName, function(match, number, quote, subString) { + result.push(quote ? subString.replace(reEscapeChar, '$1') : (number || match)); + }); + return result; + }); + + /** + * Converts `value` to a string key if it's not a string or symbol. + * + * @private + * @param {*} value The value to inspect. + * @returns {string|symbol} Returns the key. + */ + function toKey(value) { + if (typeof value == 'string' || isSymbol(value)) { + return value; + } + var result = (value + ''); + return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; + } + + /** + * Converts `func` to its source code. + * + * @private + * @param {Function} func The function to convert. + * @returns {string} Returns the source code. + */ + function toSource(func) { + if (func != null) { + try { + return funcToString.call(func); + } catch (e) {} + try { + return (func + ''); + } catch (e) {} + } + return ''; + } + + /** + * Updates wrapper `details` based on `bitmask` flags. + * + * @private + * @returns {Array} details The details to modify. + * @param {number} bitmask The bitmask flags. See `createWrap` for more details. + * @returns {Array} Returns `details`. + */ + function updateWrapDetails(details, bitmask) { + arrayEach(wrapFlags, function(pair) { + var value = '_.' + pair[0]; + if ((bitmask & pair[1]) && !arrayIncludes(details, value)) { + details.push(value); + } + }); + return details.sort(); + } + + /** + * Creates a clone of `wrapper`. + * + * @private + * @param {Object} wrapper The wrapper to clone. + * @returns {Object} Returns the cloned wrapper. + */ + function wrapperClone(wrapper) { + if (wrapper instanceof LazyWrapper) { + return wrapper.clone(); + } + var result = new LodashWrapper(wrapper.__wrapped__, wrapper.__chain__); + result.__actions__ = copyArray(wrapper.__actions__); + result.__index__ = wrapper.__index__; + result.__values__ = wrapper.__values__; + return result; + } + + /*------------------------------------------------------------------------*/ + + /** + * Creates an array of elements split into groups the length of `size`. + * If `array` can't be split evenly, the final chunk will be the remaining + * elements. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to process. + * @param {number} [size=1] The length of each chunk + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Array} Returns the new array of chunks. + * @example + * + * _.chunk(['a', 'b', 'c', 'd'], 2); + * // => [['a', 'b'], ['c', 'd']] + * + * _.chunk(['a', 'b', 'c', 'd'], 3); + * // => [['a', 'b', 'c'], ['d']] + */ + function chunk(array, size, guard) { + if ((guard ? isIterateeCall(array, size, guard) : size === undefined)) { + size = 1; + } else { + size = nativeMax(toInteger(size), 0); + } + var length = array == null ? 0 : array.length; + if (!length || size < 1) { + return []; + } + var index = 0, + resIndex = 0, + result = Array(nativeCeil(length / size)); + + while (index < length) { + result[resIndex++] = baseSlice(array, index, (index += size)); + } + return result; + } + + /** + * Creates an array with all falsey values removed. The values `false`, `null`, + * `0`, `""`, `undefined`, and `NaN` are falsey. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to compact. + * @returns {Array} Returns the new array of filtered values. + * @example + * + * _.compact([0, 1, false, 2, '', 3]); + * // => [1, 2, 3] + */ + function compact(array) { + var index = -1, + length = array == null ? 0 : array.length, + resIndex = 0, + result = []; + + while (++index < length) { + var value = array[index]; + if (value) { + result[resIndex++] = value; + } + } + return result; + } + + /** + * Creates a new array concatenating `array` with any additional arrays + * and/or values. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to concatenate. + * @param {...*} [values] The values to concatenate. + * @returns {Array} Returns the new concatenated array. + * @example + * + * var array = [1]; + * var other = _.concat(array, 2, [3], [[4]]); + * + * console.log(other); + * // => [1, 2, 3, [4]] + * + * console.log(array); + * // => [1] + */ + function concat() { + var length = arguments.length; + if (!length) { + return []; + } + var args = Array(length - 1), + array = arguments[0], + index = length; + + while (index--) { + args[index - 1] = arguments[index]; + } + return arrayPush(isArray(array) ? copyArray(array) : [array], baseFlatten(args, 1)); + } + + /** + * Creates an array of `array` values not included in the other given arrays + * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. The order and references of result values are + * determined by the first array. + * + * **Note:** Unlike `_.pullAll`, this method returns a new array. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {...Array} [values] The values to exclude. + * @returns {Array} Returns the new array of filtered values. + * @see _.without, _.xor + * @example + * + * _.difference([2, 1], [2, 3]); + * // => [1] + */ + var difference = baseRest(function(array, values) { + return isArrayLikeObject(array) + ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true)) + : []; + }); + + /** + * This method is like `_.difference` except that it accepts `iteratee` which + * is invoked for each element of `array` and `values` to generate the criterion + * by which they're compared. The order and references of result values are + * determined by the first array. The iteratee is invoked with one argument: + * (value). + * + * **Note:** Unlike `_.pullAllBy`, this method returns a new array. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {...Array} [values] The values to exclude. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Array} Returns the new array of filtered values. + * @example + * + * _.differenceBy([2.1, 1.2], [2.3, 3.4], Math.floor); + * // => [1.2] + * + * // The `_.property` iteratee shorthand. + * _.differenceBy([{ 'x': 2 }, { 'x': 1 }], [{ 'x': 1 }], 'x'); + * // => [{ 'x': 2 }] + */ + var differenceBy = baseRest(function(array, values) { + var iteratee = last(values); + if (isArrayLikeObject(iteratee)) { + iteratee = undefined; + } + return isArrayLikeObject(array) + ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), getIteratee(iteratee, 2)) + : []; + }); + + /** + * This method is like `_.difference` except that it accepts `comparator` + * which is invoked to compare elements of `array` to `values`. The order and + * references of result values are determined by the first array. The comparator + * is invoked with two arguments: (arrVal, othVal). + * + * **Note:** Unlike `_.pullAllWith`, this method returns a new array. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {...Array} [values] The values to exclude. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of filtered values. + * @example + * + * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; + * + * _.differenceWith(objects, [{ 'x': 1, 'y': 2 }], _.isEqual); + * // => [{ 'x': 2, 'y': 1 }] + */ + var differenceWith = baseRest(function(array, values) { + var comparator = last(values); + if (isArrayLikeObject(comparator)) { + comparator = undefined; + } + return isArrayLikeObject(array) + ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), undefined, comparator) + : []; + }); + + /** + * Creates a slice of `array` with `n` elements dropped from the beginning. + * + * @static + * @memberOf _ + * @since 0.5.0 + * @category Array + * @param {Array} array The array to query. + * @param {number} [n=1] The number of elements to drop. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Array} Returns the slice of `array`. + * @example + * + * _.drop([1, 2, 3]); + * // => [2, 3] + * + * _.drop([1, 2, 3], 2); + * // => [3] + * + * _.drop([1, 2, 3], 5); + * // => [] + * + * _.drop([1, 2, 3], 0); + * // => [1, 2, 3] + */ + function drop(array, n, guard) { + var length = array == null ? 0 : array.length; + if (!length) { + return []; + } + n = (guard || n === undefined) ? 1 : toInteger(n); + return baseSlice(array, n < 0 ? 0 : n, length); + } + + /** + * Creates a slice of `array` with `n` elements dropped from the end. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to query. + * @param {number} [n=1] The number of elements to drop. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Array} Returns the slice of `array`. + * @example + * + * _.dropRight([1, 2, 3]); + * // => [1, 2] + * + * _.dropRight([1, 2, 3], 2); + * // => [1] + * + * _.dropRight([1, 2, 3], 5); + * // => [] + * + * _.dropRight([1, 2, 3], 0); + * // => [1, 2, 3] + */ + function dropRight(array, n, guard) { + var length = array == null ? 0 : array.length; + if (!length) { + return []; + } + n = (guard || n === undefined) ? 1 : toInteger(n); + n = length - n; + return baseSlice(array, 0, n < 0 ? 0 : n); + } + + /** + * Creates a slice of `array` excluding elements dropped from the end. + * Elements are dropped until `predicate` returns falsey. The predicate is + * invoked with three arguments: (value, index, array). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to query. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the slice of `array`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': true }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': false } + * ]; + * + * _.dropRightWhile(users, function(o) { return !o.active; }); + * // => objects for ['barney'] + * + * // The `_.matches` iteratee shorthand. + * _.dropRightWhile(users, { 'user': 'pebbles', 'active': false }); + * // => objects for ['barney', 'fred'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.dropRightWhile(users, ['active', false]); + * // => objects for ['barney'] + * + * // The `_.property` iteratee shorthand. + * _.dropRightWhile(users, 'active'); + * // => objects for ['barney', 'fred', 'pebbles'] + */ + function dropRightWhile(array, predicate) { + return (array && array.length) + ? baseWhile(array, getIteratee(predicate, 3), true, true) + : []; + } + + /** + * Creates a slice of `array` excluding elements dropped from the beginning. + * Elements are dropped until `predicate` returns falsey. The predicate is + * invoked with three arguments: (value, index, array). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to query. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the slice of `array`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': false }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': true } + * ]; + * + * _.dropWhile(users, function(o) { return !o.active; }); + * // => objects for ['pebbles'] + * + * // The `_.matches` iteratee shorthand. + * _.dropWhile(users, { 'user': 'barney', 'active': false }); + * // => objects for ['fred', 'pebbles'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.dropWhile(users, ['active', false]); + * // => objects for ['pebbles'] + * + * // The `_.property` iteratee shorthand. + * _.dropWhile(users, 'active'); + * // => objects for ['barney', 'fred', 'pebbles'] + */ + function dropWhile(array, predicate) { + return (array && array.length) + ? baseWhile(array, getIteratee(predicate, 3), true) + : []; + } + + /** + * Fills elements of `array` with `value` from `start` up to, but not + * including, `end`. + * + * **Note:** This method mutates `array`. + * + * @static + * @memberOf _ + * @since 3.2.0 + * @category Array + * @param {Array} array The array to fill. + * @param {*} value The value to fill `array` with. + * @param {number} [start=0] The start position. + * @param {number} [end=array.length] The end position. + * @returns {Array} Returns `array`. + * @example + * + * var array = [1, 2, 3]; + * + * _.fill(array, 'a'); + * console.log(array); + * // => ['a', 'a', 'a'] + * + * _.fill(Array(3), 2); + * // => [2, 2, 2] + * + * _.fill([4, 6, 8, 10], '*', 1, 3); + * // => [4, '*', '*', 10] + */ + function fill(array, value, start, end) { + var length = array == null ? 0 : array.length; + if (!length) { + return []; + } + if (start && typeof start != 'number' && isIterateeCall(array, value, start)) { + start = 0; + end = length; + } + return baseFill(array, value, start, end); + } + + /** + * This method is like `_.find` except that it returns the index of the first + * element `predicate` returns truthy for instead of the element itself. + * + * @static + * @memberOf _ + * @since 1.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param {number} [fromIndex=0] The index to search from. + * @returns {number} Returns the index of the found element, else `-1`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': false }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': true } + * ]; + * + * _.findIndex(users, function(o) { return o.user == 'barney'; }); + * // => 0 + * + * // The `_.matches` iteratee shorthand. + * _.findIndex(users, { 'user': 'fred', 'active': false }); + * // => 1 + * + * // The `_.matchesProperty` iteratee shorthand. + * _.findIndex(users, ['active', false]); + * // => 0 + * + * // The `_.property` iteratee shorthand. + * _.findIndex(users, 'active'); + * // => 2 + */ + function findIndex(array, predicate, fromIndex) { + var length = array == null ? 0 : array.length; + if (!length) { + return -1; + } + var index = fromIndex == null ? 0 : toInteger(fromIndex); + if (index < 0) { + index = nativeMax(length + index, 0); + } + return baseFindIndex(array, getIteratee(predicate, 3), index); + } + + /** + * This method is like `_.findIndex` except that it iterates over elements + * of `collection` from right to left. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param {number} [fromIndex=array.length-1] The index to search from. + * @returns {number} Returns the index of the found element, else `-1`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': true }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': false } + * ]; + * + * _.findLastIndex(users, function(o) { return o.user == 'pebbles'; }); + * // => 2 + * + * // The `_.matches` iteratee shorthand. + * _.findLastIndex(users, { 'user': 'barney', 'active': true }); + * // => 0 + * + * // The `_.matchesProperty` iteratee shorthand. + * _.findLastIndex(users, ['active', false]); + * // => 2 + * + * // The `_.property` iteratee shorthand. + * _.findLastIndex(users, 'active'); + * // => 0 + */ + function findLastIndex(array, predicate, fromIndex) { + var length = array == null ? 0 : array.length; + if (!length) { + return -1; + } + var index = length - 1; + if (fromIndex !== undefined) { + index = toInteger(fromIndex); + index = fromIndex < 0 + ? nativeMax(length + index, 0) + : nativeMin(index, length - 1); + } + return baseFindIndex(array, getIteratee(predicate, 3), index, true); + } + + /** + * Flattens `array` a single level deep. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to flatten. + * @returns {Array} Returns the new flattened array. + * @example + * + * _.flatten([1, [2, [3, [4]], 5]]); + * // => [1, 2, [3, [4]], 5] + */ + function flatten(array) { + var length = array == null ? 0 : array.length; + return length ? baseFlatten(array, 1) : []; + } + + /** + * Recursively flattens `array`. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to flatten. + * @returns {Array} Returns the new flattened array. + * @example + * + * _.flattenDeep([1, [2, [3, [4]], 5]]); + * // => [1, 2, 3, 4, 5] + */ + function flattenDeep(array) { + var length = array == null ? 0 : array.length; + return length ? baseFlatten(array, INFINITY) : []; + } + + /** + * Recursively flatten `array` up to `depth` times. + * + * @static + * @memberOf _ + * @since 4.4.0 + * @category Array + * @param {Array} array The array to flatten. + * @param {number} [depth=1] The maximum recursion depth. + * @returns {Array} Returns the new flattened array. + * @example + * + * var array = [1, [2, [3, [4]], 5]]; + * + * _.flattenDepth(array, 1); + * // => [1, 2, [3, [4]], 5] + * + * _.flattenDepth(array, 2); + * // => [1, 2, 3, [4], 5] + */ + function flattenDepth(array, depth) { + var length = array == null ? 0 : array.length; + if (!length) { + return []; + } + depth = depth === undefined ? 1 : toInteger(depth); + return baseFlatten(array, depth); + } + + /** + * The inverse of `_.toPairs`; this method returns an object composed + * from key-value `pairs`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} pairs The key-value pairs. + * @returns {Object} Returns the new object. + * @example + * + * _.fromPairs([['a', 1], ['b', 2]]); + * // => { 'a': 1, 'b': 2 } + */ + function fromPairs(pairs) { + var index = -1, + length = pairs == null ? 0 : pairs.length, + result = {}; + + while (++index < length) { + var pair = pairs[index]; + result[pair[0]] = pair[1]; + } + return result; + } + + /** + * Gets the first element of `array`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @alias first + * @category Array + * @param {Array} array The array to query. + * @returns {*} Returns the first element of `array`. + * @example + * + * _.head([1, 2, 3]); + * // => 1 + * + * _.head([]); + * // => undefined + */ + function head(array) { + return (array && array.length) ? array[0] : undefined; + } + + /** + * Gets the index at which the first occurrence of `value` is found in `array` + * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. If `fromIndex` is negative, it's used as the + * offset from the end of `array`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} [fromIndex=0] The index to search from. + * @returns {number} Returns the index of the matched value, else `-1`. + * @example + * + * _.indexOf([1, 2, 1, 2], 2); + * // => 1 + * + * // Search from the `fromIndex`. + * _.indexOf([1, 2, 1, 2], 2, 2); + * // => 3 + */ + function indexOf(array, value, fromIndex) { + var length = array == null ? 0 : array.length; + if (!length) { + return -1; + } + var index = fromIndex == null ? 0 : toInteger(fromIndex); + if (index < 0) { + index = nativeMax(length + index, 0); + } + return baseIndexOf(array, value, index); + } + + /** + * Gets all but the last element of `array`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to query. + * @returns {Array} Returns the slice of `array`. + * @example + * + * _.initial([1, 2, 3]); + * // => [1, 2] + */ + function initial(array) { + var length = array == null ? 0 : array.length; + return length ? baseSlice(array, 0, -1) : []; + } + + /** + * Creates an array of unique values that are included in all given arrays + * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. The order and references of result values are + * determined by the first array. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @returns {Array} Returns the new array of intersecting values. + * @example + * + * _.intersection([2, 1], [2, 3]); + * // => [2] + */ + var intersection = baseRest(function(arrays) { + var mapped = arrayMap(arrays, castArrayLikeObject); + return (mapped.length && mapped[0] === arrays[0]) + ? baseIntersection(mapped) + : []; + }); + + /** + * This method is like `_.intersection` except that it accepts `iteratee` + * which is invoked for each element of each `arrays` to generate the criterion + * by which they're compared. The order and references of result values are + * determined by the first array. The iteratee is invoked with one argument: + * (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Array} Returns the new array of intersecting values. + * @example + * + * _.intersectionBy([2.1, 1.2], [2.3, 3.4], Math.floor); + * // => [2.1] + * + * // The `_.property` iteratee shorthand. + * _.intersectionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); + * // => [{ 'x': 1 }] + */ + var intersectionBy = baseRest(function(arrays) { + var iteratee = last(arrays), + mapped = arrayMap(arrays, castArrayLikeObject); + + if (iteratee === last(mapped)) { + iteratee = undefined; + } else { + mapped.pop(); + } + return (mapped.length && mapped[0] === arrays[0]) + ? baseIntersection(mapped, getIteratee(iteratee, 2)) + : []; + }); + + /** + * This method is like `_.intersection` except that it accepts `comparator` + * which is invoked to compare elements of `arrays`. The order and references + * of result values are determined by the first array. The comparator is + * invoked with two arguments: (arrVal, othVal). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of intersecting values. + * @example + * + * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; + * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; + * + * _.intersectionWith(objects, others, _.isEqual); + * // => [{ 'x': 1, 'y': 2 }] + */ + var intersectionWith = baseRest(function(arrays) { + var comparator = last(arrays), + mapped = arrayMap(arrays, castArrayLikeObject); + + comparator = typeof comparator == 'function' ? comparator : undefined; + if (comparator) { + mapped.pop(); + } + return (mapped.length && mapped[0] === arrays[0]) + ? baseIntersection(mapped, undefined, comparator) + : []; + }); + + /** + * Converts all elements in `array` into a string separated by `separator`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to convert. + * @param {string} [separator=','] The element separator. + * @returns {string} Returns the joined string. + * @example + * + * _.join(['a', 'b', 'c'], '~'); + * // => 'a~b~c' + */ + function join(array, separator) { + return array == null ? '' : nativeJoin.call(array, separator); + } + + /** + * Gets the last element of `array`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to query. + * @returns {*} Returns the last element of `array`. + * @example + * + * _.last([1, 2, 3]); + * // => 3 + */ + function last(array) { + var length = array == null ? 0 : array.length; + return length ? array[length - 1] : undefined; + } + + /** + * This method is like `_.indexOf` except that it iterates over elements of + * `array` from right to left. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} [fromIndex=array.length-1] The index to search from. + * @returns {number} Returns the index of the matched value, else `-1`. + * @example + * + * _.lastIndexOf([1, 2, 1, 2], 2); + * // => 3 + * + * // Search from the `fromIndex`. + * _.lastIndexOf([1, 2, 1, 2], 2, 2); + * // => 1 + */ + function lastIndexOf(array, value, fromIndex) { + var length = array == null ? 0 : array.length; + if (!length) { + return -1; + } + var index = length; + if (fromIndex !== undefined) { + index = toInteger(fromIndex); + index = index < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1); + } + return value === value + ? strictLastIndexOf(array, value, index) + : baseFindIndex(array, baseIsNaN, index, true); + } + + /** + * Gets the element at index `n` of `array`. If `n` is negative, the nth + * element from the end is returned. + * + * @static + * @memberOf _ + * @since 4.11.0 + * @category Array + * @param {Array} array The array to query. + * @param {number} [n=0] The index of the element to return. + * @returns {*} Returns the nth element of `array`. + * @example + * + * var array = ['a', 'b', 'c', 'd']; + * + * _.nth(array, 1); + * // => 'b' + * + * _.nth(array, -2); + * // => 'c'; + */ + function nth(array, n) { + return (array && array.length) ? baseNth(array, toInteger(n)) : undefined; + } + + /** + * Removes all given values from `array` using + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. + * + * **Note:** Unlike `_.without`, this method mutates `array`. Use `_.remove` + * to remove elements from an array by predicate. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Array + * @param {Array} array The array to modify. + * @param {...*} [values] The values to remove. + * @returns {Array} Returns `array`. + * @example + * + * var array = ['a', 'b', 'c', 'a', 'b', 'c']; + * + * _.pull(array, 'a', 'c'); + * console.log(array); + * // => ['b', 'b'] + */ + var pull = baseRest(pullAll); + + /** + * This method is like `_.pull` except that it accepts an array of values to remove. + * + * **Note:** Unlike `_.difference`, this method mutates `array`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to modify. + * @param {Array} values The values to remove. + * @returns {Array} Returns `array`. + * @example + * + * var array = ['a', 'b', 'c', 'a', 'b', 'c']; + * + * _.pullAll(array, ['a', 'c']); + * console.log(array); + * // => ['b', 'b'] + */ + function pullAll(array, values) { + return (array && array.length && values && values.length) + ? basePullAll(array, values) + : array; + } + + /** + * This method is like `_.pullAll` except that it accepts `iteratee` which is + * invoked for each element of `array` and `values` to generate the criterion + * by which they're compared. The iteratee is invoked with one argument: (value). + * + * **Note:** Unlike `_.differenceBy`, this method mutates `array`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to modify. + * @param {Array} values The values to remove. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Array} Returns `array`. + * @example + * + * var array = [{ 'x': 1 }, { 'x': 2 }, { 'x': 3 }, { 'x': 1 }]; + * + * _.pullAllBy(array, [{ 'x': 1 }, { 'x': 3 }], 'x'); + * console.log(array); + * // => [{ 'x': 2 }] + */ + function pullAllBy(array, values, iteratee) { + return (array && array.length && values && values.length) + ? basePullAll(array, values, getIteratee(iteratee, 2)) + : array; + } + + /** + * This method is like `_.pullAll` except that it accepts `comparator` which + * is invoked to compare elements of `array` to `values`. The comparator is + * invoked with two arguments: (arrVal, othVal). + * + * **Note:** Unlike `_.differenceWith`, this method mutates `array`. + * + * @static + * @memberOf _ + * @since 4.6.0 + * @category Array + * @param {Array} array The array to modify. + * @param {Array} values The values to remove. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns `array`. + * @example + * + * var array = [{ 'x': 1, 'y': 2 }, { 'x': 3, 'y': 4 }, { 'x': 5, 'y': 6 }]; + * + * _.pullAllWith(array, [{ 'x': 3, 'y': 4 }], _.isEqual); + * console.log(array); + * // => [{ 'x': 1, 'y': 2 }, { 'x': 5, 'y': 6 }] + */ + function pullAllWith(array, values, comparator) { + return (array && array.length && values && values.length) + ? basePullAll(array, values, undefined, comparator) + : array; + } + + /** + * Removes elements from `array` corresponding to `indexes` and returns an + * array of removed elements. + * + * **Note:** Unlike `_.at`, this method mutates `array`. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to modify. + * @param {...(number|number[])} [indexes] The indexes of elements to remove. + * @returns {Array} Returns the new array of removed elements. + * @example + * + * var array = ['a', 'b', 'c', 'd']; + * var pulled = _.pullAt(array, [1, 3]); + * + * console.log(array); + * // => ['a', 'c'] + * + * console.log(pulled); + * // => ['b', 'd'] + */ + var pullAt = flatRest(function(array, indexes) { + var length = array == null ? 0 : array.length, + result = baseAt(array, indexes); + + basePullAt(array, arrayMap(indexes, function(index) { + return isIndex(index, length) ? +index : index; + }).sort(compareAscending)); + + return result; + }); + + /** + * Removes all elements from `array` that `predicate` returns truthy for + * and returns an array of the removed elements. The predicate is invoked + * with three arguments: (value, index, array). + * + * **Note:** Unlike `_.filter`, this method mutates `array`. Use `_.pull` + * to pull elements from an array by value. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Array + * @param {Array} array The array to modify. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new array of removed elements. + * @example + * + * var array = [1, 2, 3, 4]; + * var evens = _.remove(array, function(n) { + * return n % 2 == 0; + * }); + * + * console.log(array); + * // => [1, 3] + * + * console.log(evens); + * // => [2, 4] + */ + function remove(array, predicate) { + var result = []; + if (!(array && array.length)) { + return result; + } + var index = -1, + indexes = [], + length = array.length; + + predicate = getIteratee(predicate, 3); + while (++index < length) { + var value = array[index]; + if (predicate(value, index, array)) { + result.push(value); + indexes.push(index); + } + } + basePullAt(array, indexes); + return result; + } + + /** + * Reverses `array` so that the first element becomes the last, the second + * element becomes the second to last, and so on. + * + * **Note:** This method mutates `array` and is based on + * [`Array#reverse`](https://mdn.io/Array/reverse). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to modify. + * @returns {Array} Returns `array`. + * @example + * + * var array = [1, 2, 3]; + * + * _.reverse(array); + * // => [3, 2, 1] + * + * console.log(array); + * // => [3, 2, 1] + */ + function reverse(array) { + return array == null ? array : nativeReverse.call(array); + } + + /** + * Creates a slice of `array` from `start` up to, but not including, `end`. + * + * **Note:** This method is used instead of + * [`Array#slice`](https://mdn.io/Array/slice) to ensure dense arrays are + * returned. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to slice. + * @param {number} [start=0] The start position. + * @param {number} [end=array.length] The end position. + * @returns {Array} Returns the slice of `array`. + */ + function slice(array, start, end) { + var length = array == null ? 0 : array.length; + if (!length) { + return []; + } + if (end && typeof end != 'number' && isIterateeCall(array, start, end)) { + start = 0; + end = length; + } + else { + start = start == null ? 0 : toInteger(start); + end = end === undefined ? length : toInteger(end); + } + return baseSlice(array, start, end); + } + + /** + * Uses a binary search to determine the lowest index at which `value` + * should be inserted into `array` in order to maintain its sort order. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The sorted array to inspect. + * @param {*} value The value to evaluate. + * @returns {number} Returns the index at which `value` should be inserted + * into `array`. + * @example + * + * _.sortedIndex([30, 50], 40); + * // => 1 + */ + function sortedIndex(array, value) { + return baseSortedIndex(array, value); + } + + /** + * This method is like `_.sortedIndex` except that it accepts `iteratee` + * which is invoked for `value` and each element of `array` to compute their + * sort ranking. The iteratee is invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The sorted array to inspect. + * @param {*} value The value to evaluate. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {number} Returns the index at which `value` should be inserted + * into `array`. + * @example + * + * var objects = [{ 'x': 4 }, { 'x': 5 }]; + * + * _.sortedIndexBy(objects, { 'x': 4 }, function(o) { return o.x; }); + * // => 0 + * + * // The `_.property` iteratee shorthand. + * _.sortedIndexBy(objects, { 'x': 4 }, 'x'); + * // => 0 + */ + function sortedIndexBy(array, value, iteratee) { + return baseSortedIndexBy(array, value, getIteratee(iteratee, 2)); + } + + /** + * This method is like `_.indexOf` except that it performs a binary + * search on a sorted `array`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @returns {number} Returns the index of the matched value, else `-1`. + * @example + * + * _.sortedIndexOf([4, 5, 5, 5, 6], 5); + * // => 1 + */ + function sortedIndexOf(array, value) { + var length = array == null ? 0 : array.length; + if (length) { + var index = baseSortedIndex(array, value); + if (index < length && eq(array[index], value)) { + return index; + } + } + return -1; + } + + /** + * This method is like `_.sortedIndex` except that it returns the highest + * index at which `value` should be inserted into `array` in order to + * maintain its sort order. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The sorted array to inspect. + * @param {*} value The value to evaluate. + * @returns {number} Returns the index at which `value` should be inserted + * into `array`. + * @example + * + * _.sortedLastIndex([4, 5, 5, 5, 6], 5); + * // => 4 + */ + function sortedLastIndex(array, value) { + return baseSortedIndex(array, value, true); + } + + /** + * This method is like `_.sortedLastIndex` except that it accepts `iteratee` + * which is invoked for `value` and each element of `array` to compute their + * sort ranking. The iteratee is invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The sorted array to inspect. + * @param {*} value The value to evaluate. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {number} Returns the index at which `value` should be inserted + * into `array`. + * @example + * + * var objects = [{ 'x': 4 }, { 'x': 5 }]; + * + * _.sortedLastIndexBy(objects, { 'x': 4 }, function(o) { return o.x; }); + * // => 1 + * + * // The `_.property` iteratee shorthand. + * _.sortedLastIndexBy(objects, { 'x': 4 }, 'x'); + * // => 1 + */ + function sortedLastIndexBy(array, value, iteratee) { + return baseSortedIndexBy(array, value, getIteratee(iteratee, 2), true); + } + + /** + * This method is like `_.lastIndexOf` except that it performs a binary + * search on a sorted `array`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @returns {number} Returns the index of the matched value, else `-1`. + * @example + * + * _.sortedLastIndexOf([4, 5, 5, 5, 6], 5); + * // => 3 + */ + function sortedLastIndexOf(array, value) { + var length = array == null ? 0 : array.length; + if (length) { + var index = baseSortedIndex(array, value, true) - 1; + if (eq(array[index], value)) { + return index; + } + } + return -1; + } + + /** + * This method is like `_.uniq` except that it's designed and optimized + * for sorted arrays. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @returns {Array} Returns the new duplicate free array. + * @example + * + * _.sortedUniq([1, 1, 2]); + * // => [1, 2] + */ + function sortedUniq(array) { + return (array && array.length) + ? baseSortedUniq(array) + : []; + } + + /** + * This method is like `_.uniqBy` except that it's designed and optimized + * for sorted arrays. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {Function} [iteratee] The iteratee invoked per element. + * @returns {Array} Returns the new duplicate free array. + * @example + * + * _.sortedUniqBy([1.1, 1.2, 2.3, 2.4], Math.floor); + * // => [1.1, 2.3] + */ + function sortedUniqBy(array, iteratee) { + return (array && array.length) + ? baseSortedUniq(array, getIteratee(iteratee, 2)) + : []; + } + + /** + * Gets all but the first element of `array`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to query. + * @returns {Array} Returns the slice of `array`. + * @example + * + * _.tail([1, 2, 3]); + * // => [2, 3] + */ + function tail(array) { + var length = array == null ? 0 : array.length; + return length ? baseSlice(array, 1, length) : []; + } + + /** + * Creates a slice of `array` with `n` elements taken from the beginning. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to query. + * @param {number} [n=1] The number of elements to take. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Array} Returns the slice of `array`. + * @example + * + * _.take([1, 2, 3]); + * // => [1] + * + * _.take([1, 2, 3], 2); + * // => [1, 2] + * + * _.take([1, 2, 3], 5); + * // => [1, 2, 3] + * + * _.take([1, 2, 3], 0); + * // => [] + */ + function take(array, n, guard) { + if (!(array && array.length)) { + return []; + } + n = (guard || n === undefined) ? 1 : toInteger(n); + return baseSlice(array, 0, n < 0 ? 0 : n); + } + + /** + * Creates a slice of `array` with `n` elements taken from the end. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to query. + * @param {number} [n=1] The number of elements to take. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Array} Returns the slice of `array`. + * @example + * + * _.takeRight([1, 2, 3]); + * // => [3] + * + * _.takeRight([1, 2, 3], 2); + * // => [2, 3] + * + * _.takeRight([1, 2, 3], 5); + * // => [1, 2, 3] + * + * _.takeRight([1, 2, 3], 0); + * // => [] + */ + function takeRight(array, n, guard) { + var length = array == null ? 0 : array.length; + if (!length) { + return []; + } + n = (guard || n === undefined) ? 1 : toInteger(n); + n = length - n; + return baseSlice(array, n < 0 ? 0 : n, length); + } + + /** + * Creates a slice of `array` with elements taken from the end. Elements are + * taken until `predicate` returns falsey. The predicate is invoked with + * three arguments: (value, index, array). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to query. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the slice of `array`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': true }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': false } + * ]; + * + * _.takeRightWhile(users, function(o) { return !o.active; }); + * // => objects for ['fred', 'pebbles'] + * + * // The `_.matches` iteratee shorthand. + * _.takeRightWhile(users, { 'user': 'pebbles', 'active': false }); + * // => objects for ['pebbles'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.takeRightWhile(users, ['active', false]); + * // => objects for ['fred', 'pebbles'] + * + * // The `_.property` iteratee shorthand. + * _.takeRightWhile(users, 'active'); + * // => [] + */ + function takeRightWhile(array, predicate) { + return (array && array.length) + ? baseWhile(array, getIteratee(predicate, 3), false, true) + : []; + } + + /** + * Creates a slice of `array` with elements taken from the beginning. Elements + * are taken until `predicate` returns falsey. The predicate is invoked with + * three arguments: (value, index, array). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Array + * @param {Array} array The array to query. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the slice of `array`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': false }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': true } + * ]; + * + * _.takeWhile(users, function(o) { return !o.active; }); + * // => objects for ['barney', 'fred'] + * + * // The `_.matches` iteratee shorthand. + * _.takeWhile(users, { 'user': 'barney', 'active': false }); + * // => objects for ['barney'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.takeWhile(users, ['active', false]); + * // => objects for ['barney', 'fred'] + * + * // The `_.property` iteratee shorthand. + * _.takeWhile(users, 'active'); + * // => [] + */ + function takeWhile(array, predicate) { + return (array && array.length) + ? baseWhile(array, getIteratee(predicate, 3)) + : []; + } + + /** + * Creates an array of unique values, in order, from all given arrays using + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @returns {Array} Returns the new array of combined values. + * @example + * + * _.union([2], [1, 2]); + * // => [2, 1] + */ + var union = baseRest(function(arrays) { + return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true)); + }); + + /** + * This method is like `_.union` except that it accepts `iteratee` which is + * invoked for each element of each `arrays` to generate the criterion by + * which uniqueness is computed. Result values are chosen from the first + * array in which the value occurs. The iteratee is invoked with one argument: + * (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Array} Returns the new array of combined values. + * @example + * + * _.unionBy([2.1], [1.2, 2.3], Math.floor); + * // => [2.1, 1.2] + * + * // The `_.property` iteratee shorthand. + * _.unionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); + * // => [{ 'x': 1 }, { 'x': 2 }] + */ + var unionBy = baseRest(function(arrays) { + var iteratee = last(arrays); + if (isArrayLikeObject(iteratee)) { + iteratee = undefined; + } + return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), getIteratee(iteratee, 2)); + }); + + /** + * This method is like `_.union` except that it accepts `comparator` which + * is invoked to compare elements of `arrays`. Result values are chosen from + * the first array in which the value occurs. The comparator is invoked + * with two arguments: (arrVal, othVal). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of combined values. + * @example + * + * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; + * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; + * + * _.unionWith(objects, others, _.isEqual); + * // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }] + */ + var unionWith = baseRest(function(arrays) { + var comparator = last(arrays); + comparator = typeof comparator == 'function' ? comparator : undefined; + return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), undefined, comparator); + }); + + /** + * Creates a duplicate-free version of an array, using + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons, in which only the first occurrence of each element + * is kept. The order of result values is determined by the order they occur + * in the array. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @returns {Array} Returns the new duplicate free array. + * @example + * + * _.uniq([2, 1, 2]); + * // => [2, 1] + */ + function uniq(array) { + return (array && array.length) ? baseUniq(array) : []; + } + + /** + * This method is like `_.uniq` except that it accepts `iteratee` which is + * invoked for each element in `array` to generate the criterion by which + * uniqueness is computed. The order of result values is determined by the + * order they occur in the array. The iteratee is invoked with one argument: + * (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Array} Returns the new duplicate free array. + * @example + * + * _.uniqBy([2.1, 1.2, 2.3], Math.floor); + * // => [2.1, 1.2] + * + * // The `_.property` iteratee shorthand. + * _.uniqBy([{ 'x': 1 }, { 'x': 2 }, { 'x': 1 }], 'x'); + * // => [{ 'x': 1 }, { 'x': 2 }] + */ + function uniqBy(array, iteratee) { + return (array && array.length) ? baseUniq(array, getIteratee(iteratee, 2)) : []; + } + + /** + * This method is like `_.uniq` except that it accepts `comparator` which + * is invoked to compare elements of `array`. The order of result values is + * determined by the order they occur in the array.The comparator is invoked + * with two arguments: (arrVal, othVal). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new duplicate free array. + * @example + * + * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 2 }]; + * + * _.uniqWith(objects, _.isEqual); + * // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }] + */ + function uniqWith(array, comparator) { + comparator = typeof comparator == 'function' ? comparator : undefined; + return (array && array.length) ? baseUniq(array, undefined, comparator) : []; + } + + /** + * This method is like `_.zip` except that it accepts an array of grouped + * elements and creates an array regrouping the elements to their pre-zip + * configuration. + * + * @static + * @memberOf _ + * @since 1.2.0 + * @category Array + * @param {Array} array The array of grouped elements to process. + * @returns {Array} Returns the new array of regrouped elements. + * @example + * + * var zipped = _.zip(['a', 'b'], [1, 2], [true, false]); + * // => [['a', 1, true], ['b', 2, false]] + * + * _.unzip(zipped); + * // => [['a', 'b'], [1, 2], [true, false]] + */ + function unzip(array) { + if (!(array && array.length)) { + return []; + } + var length = 0; + array = arrayFilter(array, function(group) { + if (isArrayLikeObject(group)) { + length = nativeMax(group.length, length); + return true; + } + }); + return baseTimes(length, function(index) { + return arrayMap(array, baseProperty(index)); + }); + } + + /** + * This method is like `_.unzip` except that it accepts `iteratee` to specify + * how regrouped values should be combined. The iteratee is invoked with the + * elements of each group: (...group). + * + * @static + * @memberOf _ + * @since 3.8.0 + * @category Array + * @param {Array} array The array of grouped elements to process. + * @param {Function} [iteratee=_.identity] The function to combine + * regrouped values. + * @returns {Array} Returns the new array of regrouped elements. + * @example + * + * var zipped = _.zip([1, 2], [10, 20], [100, 200]); + * // => [[1, 10, 100], [2, 20, 200]] + * + * _.unzipWith(zipped, _.add); + * // => [3, 30, 300] + */ + function unzipWith(array, iteratee) { + if (!(array && array.length)) { + return []; + } + var result = unzip(array); + if (iteratee == null) { + return result; + } + return arrayMap(result, function(group) { + return apply(iteratee, undefined, group); + }); + } + + /** + * Creates an array excluding all given values using + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. + * + * **Note:** Unlike `_.pull`, this method returns a new array. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {...*} [values] The values to exclude. + * @returns {Array} Returns the new array of filtered values. + * @see _.difference, _.xor + * @example + * + * _.without([2, 1, 2, 3], 1, 2); + * // => [3] + */ + var without = baseRest(function(array, values) { + return isArrayLikeObject(array) + ? baseDifference(array, values) + : []; + }); + + /** + * Creates an array of unique values that is the + * [symmetric difference](https://en.wikipedia.org/wiki/Symmetric_difference) + * of the given arrays. The order of result values is determined by the order + * they occur in the arrays. + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @returns {Array} Returns the new array of filtered values. + * @see _.difference, _.without + * @example + * + * _.xor([2, 1], [2, 3]); + * // => [1, 3] + */ + var xor = baseRest(function(arrays) { + return baseXor(arrayFilter(arrays, isArrayLikeObject)); + }); + + /** + * This method is like `_.xor` except that it accepts `iteratee` which is + * invoked for each element of each `arrays` to generate the criterion by + * which by which they're compared. The order of result values is determined + * by the order they occur in the arrays. The iteratee is invoked with one + * argument: (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Array} Returns the new array of filtered values. + * @example + * + * _.xorBy([2.1, 1.2], [2.3, 3.4], Math.floor); + * // => [1.2, 3.4] + * + * // The `_.property` iteratee shorthand. + * _.xorBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); + * // => [{ 'x': 2 }] + */ + var xorBy = baseRest(function(arrays) { + var iteratee = last(arrays); + if (isArrayLikeObject(iteratee)) { + iteratee = undefined; + } + return baseXor(arrayFilter(arrays, isArrayLikeObject), getIteratee(iteratee, 2)); + }); + + /** + * This method is like `_.xor` except that it accepts `comparator` which is + * invoked to compare elements of `arrays`. The order of result values is + * determined by the order they occur in the arrays. The comparator is invoked + * with two arguments: (arrVal, othVal). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @param {Function} [comparator] The comparator invoked per element. + * @returns {Array} Returns the new array of filtered values. + * @example + * + * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; + * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; + * + * _.xorWith(objects, others, _.isEqual); + * // => [{ 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }] + */ + var xorWith = baseRest(function(arrays) { + var comparator = last(arrays); + comparator = typeof comparator == 'function' ? comparator : undefined; + return baseXor(arrayFilter(arrays, isArrayLikeObject), undefined, comparator); + }); + + /** + * Creates an array of grouped elements, the first of which contains the + * first elements of the given arrays, the second of which contains the + * second elements of the given arrays, and so on. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {...Array} [arrays] The arrays to process. + * @returns {Array} Returns the new array of grouped elements. + * @example + * + * _.zip(['a', 'b'], [1, 2], [true, false]); + * // => [['a', 1, true], ['b', 2, false]] + */ + var zip = baseRest(unzip); + + /** + * This method is like `_.fromPairs` except that it accepts two arrays, + * one of property identifiers and one of corresponding values. + * + * @static + * @memberOf _ + * @since 0.4.0 + * @category Array + * @param {Array} [props=[]] The property identifiers. + * @param {Array} [values=[]] The property values. + * @returns {Object} Returns the new object. + * @example + * + * _.zipObject(['a', 'b'], [1, 2]); + * // => { 'a': 1, 'b': 2 } + */ + function zipObject(props, values) { + return baseZipObject(props || [], values || [], assignValue); + } + + /** + * This method is like `_.zipObject` except that it supports property paths. + * + * @static + * @memberOf _ + * @since 4.1.0 + * @category Array + * @param {Array} [props=[]] The property identifiers. + * @param {Array} [values=[]] The property values. + * @returns {Object} Returns the new object. + * @example + * + * _.zipObjectDeep(['a.b[0].c', 'a.b[1].d'], [1, 2]); + * // => { 'a': { 'b': [{ 'c': 1 }, { 'd': 2 }] } } + */ + function zipObjectDeep(props, values) { + return baseZipObject(props || [], values || [], baseSet); + } + + /** + * This method is like `_.zip` except that it accepts `iteratee` to specify + * how grouped values should be combined. The iteratee is invoked with the + * elements of each group: (...group). + * + * @static + * @memberOf _ + * @since 3.8.0 + * @category Array + * @param {...Array} [arrays] The arrays to process. + * @param {Function} [iteratee=_.identity] The function to combine + * grouped values. + * @returns {Array} Returns the new array of grouped elements. + * @example + * + * _.zipWith([1, 2], [10, 20], [100, 200], function(a, b, c) { + * return a + b + c; + * }); + * // => [111, 222] + */ + var zipWith = baseRest(function(arrays) { + var length = arrays.length, + iteratee = length > 1 ? arrays[length - 1] : undefined; + + iteratee = typeof iteratee == 'function' ? (arrays.pop(), iteratee) : undefined; + return unzipWith(arrays, iteratee); + }); + + /*------------------------------------------------------------------------*/ + + /** + * Creates a `lodash` wrapper instance that wraps `value` with explicit method + * chain sequences enabled. The result of such sequences must be unwrapped + * with `_#value`. + * + * @static + * @memberOf _ + * @since 1.3.0 + * @category Seq + * @param {*} value The value to wrap. + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36 }, + * { 'user': 'fred', 'age': 40 }, + * { 'user': 'pebbles', 'age': 1 } + * ]; + * + * var youngest = _ + * .chain(users) + * .sortBy('age') + * .map(function(o) { + * return o.user + ' is ' + o.age; + * }) + * .head() + * .value(); + * // => 'pebbles is 1' + */ + function chain(value) { + var result = lodash(value); + result.__chain__ = true; + return result; + } + + /** + * This method invokes `interceptor` and returns `value`. The interceptor + * is invoked with one argument; (value). The purpose of this method is to + * "tap into" a method chain sequence in order to modify intermediate results. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Seq + * @param {*} value The value to provide to `interceptor`. + * @param {Function} interceptor The function to invoke. + * @returns {*} Returns `value`. + * @example + * + * _([1, 2, 3]) + * .tap(function(array) { + * // Mutate input array. + * array.pop(); + * }) + * .reverse() + * .value(); + * // => [2, 1] + */ + function tap(value, interceptor) { + interceptor(value); + return value; + } + + /** + * This method is like `_.tap` except that it returns the result of `interceptor`. + * The purpose of this method is to "pass thru" values replacing intermediate + * results in a method chain sequence. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Seq + * @param {*} value The value to provide to `interceptor`. + * @param {Function} interceptor The function to invoke. + * @returns {*} Returns the result of `interceptor`. + * @example + * + * _(' abc ') + * .chain() + * .trim() + * .thru(function(value) { + * return [value]; + * }) + * .value(); + * // => ['abc'] + */ + function thru(value, interceptor) { + return interceptor(value); + } + + /** + * This method is the wrapper version of `_.at`. + * + * @name at + * @memberOf _ + * @since 1.0.0 + * @category Seq + * @param {...(string|string[])} [paths] The property paths to pick. + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 3 } }, 4] }; + * + * _(object).at(['a[0].b.c', 'a[1]']).value(); + * // => [3, 4] + */ + var wrapperAt = flatRest(function(paths) { + var length = paths.length, + start = length ? paths[0] : 0, + value = this.__wrapped__, + interceptor = function(object) { return baseAt(object, paths); }; + + if (length > 1 || this.__actions__.length || + !(value instanceof LazyWrapper) || !isIndex(start)) { + return this.thru(interceptor); + } + value = value.slice(start, +start + (length ? 1 : 0)); + value.__actions__.push({ + 'func': thru, + 'args': [interceptor], + 'thisArg': undefined + }); + return new LodashWrapper(value, this.__chain__).thru(function(array) { + if (length && !array.length) { + array.push(undefined); + } + return array; + }); + }); + + /** + * Creates a `lodash` wrapper instance with explicit method chain sequences enabled. + * + * @name chain + * @memberOf _ + * @since 0.1.0 + * @category Seq + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36 }, + * { 'user': 'fred', 'age': 40 } + * ]; + * + * // A sequence without explicit chaining. + * _(users).head(); + * // => { 'user': 'barney', 'age': 36 } + * + * // A sequence with explicit chaining. + * _(users) + * .chain() + * .head() + * .pick('user') + * .value(); + * // => { 'user': 'barney' } + */ + function wrapperChain() { + return chain(this); + } + + /** + * Executes the chain sequence and returns the wrapped result. + * + * @name commit + * @memberOf _ + * @since 3.2.0 + * @category Seq + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * var array = [1, 2]; + * var wrapped = _(array).push(3); + * + * console.log(array); + * // => [1, 2] + * + * wrapped = wrapped.commit(); + * console.log(array); + * // => [1, 2, 3] + * + * wrapped.last(); + * // => 3 + * + * console.log(array); + * // => [1, 2, 3] + */ + function wrapperCommit() { + return new LodashWrapper(this.value(), this.__chain__); + } + + /** + * Gets the next value on a wrapped object following the + * [iterator protocol](https://mdn.io/iteration_protocols#iterator). + * + * @name next + * @memberOf _ + * @since 4.0.0 + * @category Seq + * @returns {Object} Returns the next iterator value. + * @example + * + * var wrapped = _([1, 2]); + * + * wrapped.next(); + * // => { 'done': false, 'value': 1 } + * + * wrapped.next(); + * // => { 'done': false, 'value': 2 } + * + * wrapped.next(); + * // => { 'done': true, 'value': undefined } + */ + function wrapperNext() { + if (this.__values__ === undefined) { + this.__values__ = toArray(this.value()); + } + var done = this.__index__ >= this.__values__.length, + value = done ? undefined : this.__values__[this.__index__++]; + + return { 'done': done, 'value': value }; + } + + /** + * Enables the wrapper to be iterable. + * + * @name Symbol.iterator + * @memberOf _ + * @since 4.0.0 + * @category Seq + * @returns {Object} Returns the wrapper object. + * @example + * + * var wrapped = _([1, 2]); + * + * wrapped[Symbol.iterator]() === wrapped; + * // => true + * + * Array.from(wrapped); + * // => [1, 2] + */ + function wrapperToIterator() { + return this; + } + + /** + * Creates a clone of the chain sequence planting `value` as the wrapped value. + * + * @name plant + * @memberOf _ + * @since 3.2.0 + * @category Seq + * @param {*} value The value to plant. + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * function square(n) { + * return n * n; + * } + * + * var wrapped = _([1, 2]).map(square); + * var other = wrapped.plant([3, 4]); + * + * other.value(); + * // => [9, 16] + * + * wrapped.value(); + * // => [1, 4] + */ + function wrapperPlant(value) { + var result, + parent = this; + + while (parent instanceof baseLodash) { + var clone = wrapperClone(parent); + clone.__index__ = 0; + clone.__values__ = undefined; + if (result) { + previous.__wrapped__ = clone; + } else { + result = clone; + } + var previous = clone; + parent = parent.__wrapped__; + } + previous.__wrapped__ = value; + return result; + } + + /** + * This method is the wrapper version of `_.reverse`. + * + * **Note:** This method mutates the wrapped array. + * + * @name reverse + * @memberOf _ + * @since 0.1.0 + * @category Seq + * @returns {Object} Returns the new `lodash` wrapper instance. + * @example + * + * var array = [1, 2, 3]; + * + * _(array).reverse().value() + * // => [3, 2, 1] + * + * console.log(array); + * // => [3, 2, 1] + */ + function wrapperReverse() { + var value = this.__wrapped__; + if (value instanceof LazyWrapper) { + var wrapped = value; + if (this.__actions__.length) { + wrapped = new LazyWrapper(this); + } + wrapped = wrapped.reverse(); + wrapped.__actions__.push({ + 'func': thru, + 'args': [reverse], + 'thisArg': undefined + }); + return new LodashWrapper(wrapped, this.__chain__); + } + return this.thru(reverse); + } + + /** + * Executes the chain sequence to resolve the unwrapped value. + * + * @name value + * @memberOf _ + * @since 0.1.0 + * @alias toJSON, valueOf + * @category Seq + * @returns {*} Returns the resolved unwrapped value. + * @example + * + * _([1, 2, 3]).value(); + * // => [1, 2, 3] + */ + function wrapperValue() { + return baseWrapperValue(this.__wrapped__, this.__actions__); + } + + /*------------------------------------------------------------------------*/ + + /** + * Creates an object composed of keys generated from the results of running + * each element of `collection` thru `iteratee`. The corresponding value of + * each key is the number of times the key was returned by `iteratee`. The + * iteratee is invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 0.5.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The iteratee to transform keys. + * @returns {Object} Returns the composed aggregate object. + * @example + * + * _.countBy([6.1, 4.2, 6.3], Math.floor); + * // => { '4': 1, '6': 2 } + * + * // The `_.property` iteratee shorthand. + * _.countBy(['one', 'two', 'three'], 'length'); + * // => { '3': 2, '5': 1 } + */ + var countBy = createAggregator(function(result, value, key) { + if (hasOwnProperty.call(result, key)) { + ++result[key]; + } else { + baseAssignValue(result, key, 1); + } + }); + + /** + * Checks if `predicate` returns truthy for **all** elements of `collection`. + * Iteration is stopped once `predicate` returns falsey. The predicate is + * invoked with three arguments: (value, index|key, collection). + * + * **Note:** This method returns `true` for + * [empty collections](https://en.wikipedia.org/wiki/Empty_set) because + * [everything is true](https://en.wikipedia.org/wiki/Vacuous_truth) of + * elements of empty collections. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {boolean} Returns `true` if all elements pass the predicate check, + * else `false`. + * @example + * + * _.every([true, 1, null, 'yes'], Boolean); + * // => false + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': false }, + * { 'user': 'fred', 'age': 40, 'active': false } + * ]; + * + * // The `_.matches` iteratee shorthand. + * _.every(users, { 'user': 'barney', 'active': false }); + * // => false + * + * // The `_.matchesProperty` iteratee shorthand. + * _.every(users, ['active', false]); + * // => true + * + * // The `_.property` iteratee shorthand. + * _.every(users, 'active'); + * // => false + */ + function every(collection, predicate, guard) { + var func = isArray(collection) ? arrayEvery : baseEvery; + if (guard && isIterateeCall(collection, predicate, guard)) { + predicate = undefined; + } + return func(collection, getIteratee(predicate, 3)); + } + + /** + * Iterates over elements of `collection`, returning an array of all elements + * `predicate` returns truthy for. The predicate is invoked with three + * arguments: (value, index|key, collection). + * + * **Note:** Unlike `_.remove`, this method returns a new array. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new filtered array. + * @see _.reject + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': true }, + * { 'user': 'fred', 'age': 40, 'active': false } + * ]; + * + * _.filter(users, function(o) { return !o.active; }); + * // => objects for ['fred'] + * + * // The `_.matches` iteratee shorthand. + * _.filter(users, { 'age': 36, 'active': true }); + * // => objects for ['barney'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.filter(users, ['active', false]); + * // => objects for ['fred'] + * + * // The `_.property` iteratee shorthand. + * _.filter(users, 'active'); + * // => objects for ['barney'] + */ + function filter(collection, predicate) { + var func = isArray(collection) ? arrayFilter : baseFilter; + return func(collection, getIteratee(predicate, 3)); + } + + /** + * Iterates over elements of `collection`, returning the first element + * `predicate` returns truthy for. The predicate is invoked with three + * arguments: (value, index|key, collection). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param {number} [fromIndex=0] The index to search from. + * @returns {*} Returns the matched element, else `undefined`. + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': true }, + * { 'user': 'fred', 'age': 40, 'active': false }, + * { 'user': 'pebbles', 'age': 1, 'active': true } + * ]; + * + * _.find(users, function(o) { return o.age < 40; }); + * // => object for 'barney' + * + * // The `_.matches` iteratee shorthand. + * _.find(users, { 'age': 1, 'active': true }); + * // => object for 'pebbles' + * + * // The `_.matchesProperty` iteratee shorthand. + * _.find(users, ['active', false]); + * // => object for 'fred' + * + * // The `_.property` iteratee shorthand. + * _.find(users, 'active'); + * // => object for 'barney' + */ + var find = createFind(findIndex); + + /** + * This method is like `_.find` except that it iterates over elements of + * `collection` from right to left. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Collection + * @param {Array|Object} collection The collection to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param {number} [fromIndex=collection.length-1] The index to search from. + * @returns {*} Returns the matched element, else `undefined`. + * @example + * + * _.findLast([1, 2, 3, 4], function(n) { + * return n % 2 == 1; + * }); + * // => 3 + */ + var findLast = createFind(findLastIndex); + + /** + * Creates a flattened array of values by running each element in `collection` + * thru `iteratee` and flattening the mapped results. The iteratee is invoked + * with three arguments: (value, index|key, collection). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new flattened array. + * @example + * + * function duplicate(n) { + * return [n, n]; + * } + * + * _.flatMap([1, 2], duplicate); + * // => [1, 1, 2, 2] + */ + function flatMap(collection, iteratee) { + return baseFlatten(map(collection, iteratee), 1); + } + + /** + * This method is like `_.flatMap` except that it recursively flattens the + * mapped results. + * + * @static + * @memberOf _ + * @since 4.7.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new flattened array. + * @example + * + * function duplicate(n) { + * return [[[n, n]]]; + * } + * + * _.flatMapDeep([1, 2], duplicate); + * // => [1, 1, 2, 2] + */ + function flatMapDeep(collection, iteratee) { + return baseFlatten(map(collection, iteratee), INFINITY); + } + + /** + * This method is like `_.flatMap` except that it recursively flattens the + * mapped results up to `depth` times. + * + * @static + * @memberOf _ + * @since 4.7.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @param {number} [depth=1] The maximum recursion depth. + * @returns {Array} Returns the new flattened array. + * @example + * + * function duplicate(n) { + * return [[[n, n]]]; + * } + * + * _.flatMapDepth([1, 2], duplicate, 2); + * // => [[1, 1], [2, 2]] + */ + function flatMapDepth(collection, iteratee, depth) { + depth = depth === undefined ? 1 : toInteger(depth); + return baseFlatten(map(collection, iteratee), depth); + } + + /** + * Iterates over elements of `collection` and invokes `iteratee` for each element. + * The iteratee is invoked with three arguments: (value, index|key, collection). + * Iteratee functions may exit iteration early by explicitly returning `false`. + * + * **Note:** As with other "Collections" methods, objects with a "length" + * property are iterated like arrays. To avoid this behavior use `_.forIn` + * or `_.forOwn` for object iteration. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @alias each + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array|Object} Returns `collection`. + * @see _.forEachRight + * @example + * + * _.forEach([1, 2], function(value) { + * console.log(value); + * }); + * // => Logs `1` then `2`. + * + * _.forEach({ 'a': 1, 'b': 2 }, function(value, key) { + * console.log(key); + * }); + * // => Logs 'a' then 'b' (iteration order is not guaranteed). + */ + function forEach(collection, iteratee) { + var func = isArray(collection) ? arrayEach : baseEach; + return func(collection, getIteratee(iteratee, 3)); + } + + /** + * This method is like `_.forEach` except that it iterates over elements of + * `collection` from right to left. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @alias eachRight + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array|Object} Returns `collection`. + * @see _.forEach + * @example + * + * _.forEachRight([1, 2], function(value) { + * console.log(value); + * }); + * // => Logs `2` then `1`. + */ + function forEachRight(collection, iteratee) { + var func = isArray(collection) ? arrayEachRight : baseEachRight; + return func(collection, getIteratee(iteratee, 3)); + } + + /** + * Creates an object composed of keys generated from the results of running + * each element of `collection` thru `iteratee`. The order of grouped values + * is determined by the order they occur in `collection`. The corresponding + * value of each key is an array of elements responsible for generating the + * key. The iteratee is invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The iteratee to transform keys. + * @returns {Object} Returns the composed aggregate object. + * @example + * + * _.groupBy([6.1, 4.2, 6.3], Math.floor); + * // => { '4': [4.2], '6': [6.1, 6.3] } + * + * // The `_.property` iteratee shorthand. + * _.groupBy(['one', 'two', 'three'], 'length'); + * // => { '3': ['one', 'two'], '5': ['three'] } + */ + var groupBy = createAggregator(function(result, value, key) { + if (hasOwnProperty.call(result, key)) { + result[key].push(value); + } else { + baseAssignValue(result, key, [value]); + } + }); + + /** + * Checks if `value` is in `collection`. If `collection` is a string, it's + * checked for a substring of `value`, otherwise + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * is used for equality comparisons. If `fromIndex` is negative, it's used as + * the offset from the end of `collection`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object|string} collection The collection to inspect. + * @param {*} value The value to search for. + * @param {number} [fromIndex=0] The index to search from. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`. + * @returns {boolean} Returns `true` if `value` is found, else `false`. + * @example + * + * _.includes([1, 2, 3], 1); + * // => true + * + * _.includes([1, 2, 3], 1, 2); + * // => false + * + * _.includes({ 'a': 1, 'b': 2 }, 1); + * // => true + * + * _.includes('abcd', 'bc'); + * // => true + */ + function includes(collection, value, fromIndex, guard) { + collection = isArrayLike(collection) ? collection : values(collection); + fromIndex = (fromIndex && !guard) ? toInteger(fromIndex) : 0; + + var length = collection.length; + if (fromIndex < 0) { + fromIndex = nativeMax(length + fromIndex, 0); + } + return isString(collection) + ? (fromIndex <= length && collection.indexOf(value, fromIndex) > -1) + : (!!length && baseIndexOf(collection, value, fromIndex) > -1); + } + + /** + * Invokes the method at `path` of each element in `collection`, returning + * an array of the results of each invoked method. Any additional arguments + * are provided to each invoked method. If `path` is a function, it's invoked + * for, and `this` bound to, each element in `collection`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Array|Function|string} path The path of the method to invoke or + * the function invoked per iteration. + * @param {...*} [args] The arguments to invoke each method with. + * @returns {Array} Returns the array of results. + * @example + * + * _.invokeMap([[5, 1, 7], [3, 2, 1]], 'sort'); + * // => [[1, 5, 7], [1, 2, 3]] + * + * _.invokeMap([123, 456], String.prototype.split, ''); + * // => [['1', '2', '3'], ['4', '5', '6']] + */ + var invokeMap = baseRest(function(collection, path, args) { + var index = -1, + isFunc = typeof path == 'function', + result = isArrayLike(collection) ? Array(collection.length) : []; + + baseEach(collection, function(value) { + result[++index] = isFunc ? apply(path, value, args) : baseInvoke(value, path, args); + }); + return result; + }); + + /** + * Creates an object composed of keys generated from the results of running + * each element of `collection` thru `iteratee`. The corresponding value of + * each key is the last element responsible for generating the key. The + * iteratee is invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The iteratee to transform keys. + * @returns {Object} Returns the composed aggregate object. + * @example + * + * var array = [ + * { 'dir': 'left', 'code': 97 }, + * { 'dir': 'right', 'code': 100 } + * ]; + * + * _.keyBy(array, function(o) { + * return String.fromCharCode(o.code); + * }); + * // => { 'a': { 'dir': 'left', 'code': 97 }, 'd': { 'dir': 'right', 'code': 100 } } + * + * _.keyBy(array, 'dir'); + * // => { 'left': { 'dir': 'left', 'code': 97 }, 'right': { 'dir': 'right', 'code': 100 } } + */ + var keyBy = createAggregator(function(result, value, key) { + baseAssignValue(result, key, value); + }); + + /** + * Creates an array of values by running each element in `collection` thru + * `iteratee`. The iteratee is invoked with three arguments: + * (value, index|key, collection). + * + * Many lodash methods are guarded to work as iteratees for methods like + * `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`. + * + * The guarded methods are: + * `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`, + * `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`, + * `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`, + * `template`, `trim`, `trimEnd`, `trimStart`, and `words` + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new mapped array. + * @example + * + * function square(n) { + * return n * n; + * } + * + * _.map([4, 8], square); + * // => [16, 64] + * + * _.map({ 'a': 4, 'b': 8 }, square); + * // => [16, 64] (iteration order is not guaranteed) + * + * var users = [ + * { 'user': 'barney' }, + * { 'user': 'fred' } + * ]; + * + * // The `_.property` iteratee shorthand. + * _.map(users, 'user'); + * // => ['barney', 'fred'] + */ + function map(collection, iteratee) { + var func = isArray(collection) ? arrayMap : baseMap; + return func(collection, getIteratee(iteratee, 3)); + } + + /** + * This method is like `_.sortBy` except that it allows specifying the sort + * orders of the iteratees to sort by. If `orders` is unspecified, all values + * are sorted in ascending order. Otherwise, specify an order of "desc" for + * descending or "asc" for ascending sort order of corresponding values. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Array[]|Function[]|Object[]|string[]} [iteratees=[_.identity]] + * The iteratees to sort by. + * @param {string[]} [orders] The sort orders of `iteratees`. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`. + * @returns {Array} Returns the new sorted array. + * @example + * + * var users = [ + * { 'user': 'fred', 'age': 48 }, + * { 'user': 'barney', 'age': 34 }, + * { 'user': 'fred', 'age': 40 }, + * { 'user': 'barney', 'age': 36 } + * ]; + * + * // Sort by `user` in ascending order and by `age` in descending order. + * _.orderBy(users, ['user', 'age'], ['asc', 'desc']); + * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] + */ + function orderBy(collection, iteratees, orders, guard) { + if (collection == null) { + return []; + } + if (!isArray(iteratees)) { + iteratees = iteratees == null ? [] : [iteratees]; + } + orders = guard ? undefined : orders; + if (!isArray(orders)) { + orders = orders == null ? [] : [orders]; + } + return baseOrderBy(collection, iteratees, orders); + } + + /** + * Creates an array of elements split into two groups, the first of which + * contains elements `predicate` returns truthy for, the second of which + * contains elements `predicate` returns falsey for. The predicate is + * invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the array of grouped elements. + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': false }, + * { 'user': 'fred', 'age': 40, 'active': true }, + * { 'user': 'pebbles', 'age': 1, 'active': false } + * ]; + * + * _.partition(users, function(o) { return o.active; }); + * // => objects for [['fred'], ['barney', 'pebbles']] + * + * // The `_.matches` iteratee shorthand. + * _.partition(users, { 'age': 1, 'active': false }); + * // => objects for [['pebbles'], ['barney', 'fred']] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.partition(users, ['active', false]); + * // => objects for [['barney', 'pebbles'], ['fred']] + * + * // The `_.property` iteratee shorthand. + * _.partition(users, 'active'); + * // => objects for [['fred'], ['barney', 'pebbles']] + */ + var partition = createAggregator(function(result, value, key) { + result[key ? 0 : 1].push(value); + }, function() { return [[], []]; }); + + /** + * Reduces `collection` to a value which is the accumulated result of running + * each element in `collection` thru `iteratee`, where each successive + * invocation is supplied the return value of the previous. If `accumulator` + * is not given, the first element of `collection` is used as the initial + * value. The iteratee is invoked with four arguments: + * (accumulator, value, index|key, collection). + * + * Many lodash methods are guarded to work as iteratees for methods like + * `_.reduce`, `_.reduceRight`, and `_.transform`. + * + * The guarded methods are: + * `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`, + * and `sortBy` + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @param {*} [accumulator] The initial value. + * @returns {*} Returns the accumulated value. + * @see _.reduceRight + * @example + * + * _.reduce([1, 2], function(sum, n) { + * return sum + n; + * }, 0); + * // => 3 + * + * _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { + * (result[value] || (result[value] = [])).push(key); + * return result; + * }, {}); + * // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed) + */ + function reduce(collection, iteratee, accumulator) { + var func = isArray(collection) ? arrayReduce : baseReduce, + initAccum = arguments.length < 3; + + return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEach); + } + + /** + * This method is like `_.reduce` except that it iterates over elements of + * `collection` from right to left. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @param {*} [accumulator] The initial value. + * @returns {*} Returns the accumulated value. + * @see _.reduce + * @example + * + * var array = [[0, 1], [2, 3], [4, 5]]; + * + * _.reduceRight(array, function(flattened, other) { + * return flattened.concat(other); + * }, []); + * // => [4, 5, 2, 3, 0, 1] + */ + function reduceRight(collection, iteratee, accumulator) { + var func = isArray(collection) ? arrayReduceRight : baseReduce, + initAccum = arguments.length < 3; + + return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEachRight); + } + + /** + * The opposite of `_.filter`; this method returns the elements of `collection` + * that `predicate` does **not** return truthy for. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new filtered array. + * @see _.filter + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': false }, + * { 'user': 'fred', 'age': 40, 'active': true } + * ]; + * + * _.reject(users, function(o) { return !o.active; }); + * // => objects for ['fred'] + * + * // The `_.matches` iteratee shorthand. + * _.reject(users, { 'age': 40, 'active': true }); + * // => objects for ['barney'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.reject(users, ['active', false]); + * // => objects for ['fred'] + * + * // The `_.property` iteratee shorthand. + * _.reject(users, 'active'); + * // => objects for ['barney'] + */ + function reject(collection, predicate) { + var func = isArray(collection) ? arrayFilter : baseFilter; + return func(collection, negate(getIteratee(predicate, 3))); + } + + /** + * Gets a random element from `collection`. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Collection + * @param {Array|Object} collection The collection to sample. + * @returns {*} Returns the random element. + * @example + * + * _.sample([1, 2, 3, 4]); + * // => 2 + */ + function sample(collection) { + var func = isArray(collection) ? arraySample : baseSample; + return func(collection); + } + + /** + * Gets `n` random elements at unique keys from `collection` up to the + * size of `collection`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Collection + * @param {Array|Object} collection The collection to sample. + * @param {number} [n=1] The number of elements to sample. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Array} Returns the random elements. + * @example + * + * _.sampleSize([1, 2, 3], 2); + * // => [3, 1] + * + * _.sampleSize([1, 2, 3], 4); + * // => [2, 3, 1] + */ + function sampleSize(collection, n, guard) { + if ((guard ? isIterateeCall(collection, n, guard) : n === undefined)) { + n = 1; + } else { + n = toInteger(n); + } + var func = isArray(collection) ? arraySampleSize : baseSampleSize; + return func(collection, n); + } + + /** + * Creates an array of shuffled values, using a version of the + * [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to shuffle. + * @returns {Array} Returns the new shuffled array. + * @example + * + * _.shuffle([1, 2, 3, 4]); + * // => [4, 1, 3, 2] + */ + function shuffle(collection) { + var func = isArray(collection) ? arrayShuffle : baseShuffle; + return func(collection); + } + + /** + * Gets the size of `collection` by returning its length for array-like + * values or the number of own enumerable string keyed properties for objects. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object|string} collection The collection to inspect. + * @returns {number} Returns the collection size. + * @example + * + * _.size([1, 2, 3]); + * // => 3 + * + * _.size({ 'a': 1, 'b': 2 }); + * // => 2 + * + * _.size('pebbles'); + * // => 7 + */ + function size(collection) { + if (collection == null) { + return 0; + } + if (isArrayLike(collection)) { + return isString(collection) ? stringSize(collection) : collection.length; + } + var tag = getTag(collection); + if (tag == mapTag || tag == setTag) { + return collection.size; + } + return baseKeys(collection).length; + } + + /** + * Checks if `predicate` returns truthy for **any** element of `collection`. + * Iteration is stopped once `predicate` returns truthy. The predicate is + * invoked with three arguments: (value, index|key, collection). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {boolean} Returns `true` if any element passes the predicate check, + * else `false`. + * @example + * + * _.some([null, 0, 'yes', false], Boolean); + * // => true + * + * var users = [ + * { 'user': 'barney', 'active': true }, + * { 'user': 'fred', 'active': false } + * ]; + * + * // The `_.matches` iteratee shorthand. + * _.some(users, { 'user': 'barney', 'active': false }); + * // => false + * + * // The `_.matchesProperty` iteratee shorthand. + * _.some(users, ['active', false]); + * // => true + * + * // The `_.property` iteratee shorthand. + * _.some(users, 'active'); + * // => true + */ + function some(collection, predicate, guard) { + var func = isArray(collection) ? arraySome : baseSome; + if (guard && isIterateeCall(collection, predicate, guard)) { + predicate = undefined; + } + return func(collection, getIteratee(predicate, 3)); + } + + /** + * Creates an array of elements, sorted in ascending order by the results of + * running each element in a collection thru each iteratee. This method + * performs a stable sort, that is, it preserves the original sort order of + * equal elements. The iteratees are invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {...(Function|Function[])} [iteratees=[_.identity]] + * The iteratees to sort by. + * @returns {Array} Returns the new sorted array. + * @example + * + * var users = [ + * { 'user': 'fred', 'age': 48 }, + * { 'user': 'barney', 'age': 36 }, + * { 'user': 'fred', 'age': 40 }, + * { 'user': 'barney', 'age': 34 } + * ]; + * + * _.sortBy(users, [function(o) { return o.user; }]); + * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] + * + * _.sortBy(users, ['user', 'age']); + * // => objects for [['barney', 34], ['barney', 36], ['fred', 40], ['fred', 48]] + */ + var sortBy = baseRest(function(collection, iteratees) { + if (collection == null) { + return []; + } + var length = iteratees.length; + if (length > 1 && isIterateeCall(collection, iteratees[0], iteratees[1])) { + iteratees = []; + } else if (length > 2 && isIterateeCall(iteratees[0], iteratees[1], iteratees[2])) { + iteratees = [iteratees[0]]; + } + return baseOrderBy(collection, baseFlatten(iteratees, 1), []); + }); + + /*------------------------------------------------------------------------*/ + + /** + * Gets the timestamp of the number of milliseconds that have elapsed since + * the Unix epoch (1 January 1970 00:00:00 UTC). + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Date + * @returns {number} Returns the timestamp. + * @example + * + * _.defer(function(stamp) { + * console.log(_.now() - stamp); + * }, _.now()); + * // => Logs the number of milliseconds it took for the deferred invocation. + */ + var now = ctxNow || function() { + return root.Date.now(); + }; + + /*------------------------------------------------------------------------*/ + + /** + * The opposite of `_.before`; this method creates a function that invokes + * `func` once it's called `n` or more times. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {number} n The number of calls before `func` is invoked. + * @param {Function} func The function to restrict. + * @returns {Function} Returns the new restricted function. + * @example + * + * var saves = ['profile', 'settings']; + * + * var done = _.after(saves.length, function() { + * console.log('done saving!'); + * }); + * + * _.forEach(saves, function(type) { + * asyncSave({ 'type': type, 'complete': done }); + * }); + * // => Logs 'done saving!' after the two async saves have completed. + */ + function after(n, func) { + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + n = toInteger(n); + return function() { + if (--n < 1) { + return func.apply(this, arguments); + } + }; + } + + /** + * Creates a function that invokes `func`, with up to `n` arguments, + * ignoring any additional arguments. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Function + * @param {Function} func The function to cap arguments for. + * @param {number} [n=func.length] The arity cap. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Function} Returns the new capped function. + * @example + * + * _.map(['6', '8', '10'], _.ary(parseInt, 1)); + * // => [6, 8, 10] + */ + function ary(func, n, guard) { + n = guard ? undefined : n; + n = (func && n == null) ? func.length : n; + return createWrap(func, WRAP_ARY_FLAG, undefined, undefined, undefined, undefined, n); + } + + /** + * Creates a function that invokes `func`, with the `this` binding and arguments + * of the created function, while it's called less than `n` times. Subsequent + * calls to the created function return the result of the last `func` invocation. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Function + * @param {number} n The number of calls at which `func` is no longer invoked. + * @param {Function} func The function to restrict. + * @returns {Function} Returns the new restricted function. + * @example + * + * jQuery(element).on('click', _.before(5, addContactToList)); + * // => Allows adding up to 4 contacts to the list. + */ + function before(n, func) { + var result; + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + n = toInteger(n); + return function() { + if (--n > 0) { + result = func.apply(this, arguments); + } + if (n <= 1) { + func = undefined; + } + return result; + }; + } + + /** + * Creates a function that invokes `func` with the `this` binding of `thisArg` + * and `partials` prepended to the arguments it receives. + * + * The `_.bind.placeholder` value, which defaults to `_` in monolithic builds, + * may be used as a placeholder for partially applied arguments. + * + * **Note:** Unlike native `Function#bind`, this method doesn't set the "length" + * property of bound functions. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to bind. + * @param {*} thisArg The `this` binding of `func`. + * @param {...*} [partials] The arguments to be partially applied. + * @returns {Function} Returns the new bound function. + * @example + * + * function greet(greeting, punctuation) { + * return greeting + ' ' + this.user + punctuation; + * } + * + * var object = { 'user': 'fred' }; + * + * var bound = _.bind(greet, object, 'hi'); + * bound('!'); + * // => 'hi fred!' + * + * // Bound with placeholders. + * var bound = _.bind(greet, object, _, '!'); + * bound('hi'); + * // => 'hi fred!' + */ + var bind = baseRest(function(func, thisArg, partials) { + var bitmask = WRAP_BIND_FLAG; + if (partials.length) { + var holders = replaceHolders(partials, getHolder(bind)); + bitmask |= WRAP_PARTIAL_FLAG; + } + return createWrap(func, bitmask, thisArg, partials, holders); + }); + + /** + * Creates a function that invokes the method at `object[key]` with `partials` + * prepended to the arguments it receives. + * + * This method differs from `_.bind` by allowing bound functions to reference + * methods that may be redefined or don't yet exist. See + * [Peter Michaux's article](http://peter.michaux.ca/articles/lazy-function-definition-pattern) + * for more details. + * + * The `_.bindKey.placeholder` value, which defaults to `_` in monolithic + * builds, may be used as a placeholder for partially applied arguments. + * + * @static + * @memberOf _ + * @since 0.10.0 + * @category Function + * @param {Object} object The object to invoke the method on. + * @param {string} key The key of the method. + * @param {...*} [partials] The arguments to be partially applied. + * @returns {Function} Returns the new bound function. + * @example + * + * var object = { + * 'user': 'fred', + * 'greet': function(greeting, punctuation) { + * return greeting + ' ' + this.user + punctuation; + * } + * }; + * + * var bound = _.bindKey(object, 'greet', 'hi'); + * bound('!'); + * // => 'hi fred!' + * + * object.greet = function(greeting, punctuation) { + * return greeting + 'ya ' + this.user + punctuation; + * }; + * + * bound('!'); + * // => 'hiya fred!' + * + * // Bound with placeholders. + * var bound = _.bindKey(object, 'greet', _, '!'); + * bound('hi'); + * // => 'hiya fred!' + */ + var bindKey = baseRest(function(object, key, partials) { + var bitmask = WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG; + if (partials.length) { + var holders = replaceHolders(partials, getHolder(bindKey)); + bitmask |= WRAP_PARTIAL_FLAG; + } + return createWrap(key, bitmask, object, partials, holders); + }); + + /** + * Creates a function that accepts arguments of `func` and either invokes + * `func` returning its result, if at least `arity` number of arguments have + * been provided, or returns a function that accepts the remaining `func` + * arguments, and so on. The arity of `func` may be specified if `func.length` + * is not sufficient. + * + * The `_.curry.placeholder` value, which defaults to `_` in monolithic builds, + * may be used as a placeholder for provided arguments. + * + * **Note:** This method doesn't set the "length" property of curried functions. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Function + * @param {Function} func The function to curry. + * @param {number} [arity=func.length] The arity of `func`. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Function} Returns the new curried function. + * @example + * + * var abc = function(a, b, c) { + * return [a, b, c]; + * }; + * + * var curried = _.curry(abc); + * + * curried(1)(2)(3); + * // => [1, 2, 3] + * + * curried(1, 2)(3); + * // => [1, 2, 3] + * + * curried(1, 2, 3); + * // => [1, 2, 3] + * + * // Curried with placeholders. + * curried(1)(_, 3)(2); + * // => [1, 2, 3] + */ + function curry(func, arity, guard) { + arity = guard ? undefined : arity; + var result = createWrap(func, WRAP_CURRY_FLAG, undefined, undefined, undefined, undefined, undefined, arity); + result.placeholder = curry.placeholder; + return result; + } + + /** + * This method is like `_.curry` except that arguments are applied to `func` + * in the manner of `_.partialRight` instead of `_.partial`. + * + * The `_.curryRight.placeholder` value, which defaults to `_` in monolithic + * builds, may be used as a placeholder for provided arguments. + * + * **Note:** This method doesn't set the "length" property of curried functions. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Function + * @param {Function} func The function to curry. + * @param {number} [arity=func.length] The arity of `func`. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Function} Returns the new curried function. + * @example + * + * var abc = function(a, b, c) { + * return [a, b, c]; + * }; + * + * var curried = _.curryRight(abc); + * + * curried(3)(2)(1); + * // => [1, 2, 3] + * + * curried(2, 3)(1); + * // => [1, 2, 3] + * + * curried(1, 2, 3); + * // => [1, 2, 3] + * + * // Curried with placeholders. + * curried(3)(1, _)(2); + * // => [1, 2, 3] + */ + function curryRight(func, arity, guard) { + arity = guard ? undefined : arity; + var result = createWrap(func, WRAP_CURRY_RIGHT_FLAG, undefined, undefined, undefined, undefined, undefined, arity); + result.placeholder = curryRight.placeholder; + return result; + } + + /** + * Creates a debounced function that delays invoking `func` until after `wait` + * milliseconds have elapsed since the last time the debounced function was + * invoked. The debounced function comes with a `cancel` method to cancel + * delayed `func` invocations and a `flush` method to immediately invoke them. + * Provide `options` to indicate whether `func` should be invoked on the + * leading and/or trailing edge of the `wait` timeout. The `func` is invoked + * with the last arguments provided to the debounced function. Subsequent + * calls to the debounced function return the result of the last `func` + * invocation. + * + * **Note:** If `leading` and `trailing` options are `true`, `func` is + * invoked on the trailing edge of the timeout only if the debounced function + * is invoked more than once during the `wait` timeout. + * + * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred + * until to the next tick, similar to `setTimeout` with a timeout of `0`. + * + * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/) + * for details over the differences between `_.debounce` and `_.throttle`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to debounce. + * @param {number} [wait=0] The number of milliseconds to delay. + * @param {Object} [options={}] The options object. + * @param {boolean} [options.leading=false] + * Specify invoking on the leading edge of the timeout. + * @param {number} [options.maxWait] + * The maximum time `func` is allowed to be delayed before it's invoked. + * @param {boolean} [options.trailing=true] + * Specify invoking on the trailing edge of the timeout. + * @returns {Function} Returns the new debounced function. + * @example + * + * // Avoid costly calculations while the window size is in flux. + * jQuery(window).on('resize', _.debounce(calculateLayout, 150)); + * + * // Invoke `sendMail` when clicked, debouncing subsequent calls. + * jQuery(element).on('click', _.debounce(sendMail, 300, { + * 'leading': true, + * 'trailing': false + * })); + * + * // Ensure `batchLog` is invoked once after 1 second of debounced calls. + * var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 }); + * var source = new EventSource('/stream'); + * jQuery(source).on('message', debounced); + * + * // Cancel the trailing debounced invocation. + * jQuery(window).on('popstate', debounced.cancel); + */ + function debounce(func, wait, options) { + var lastArgs, + lastThis, + maxWait, + result, + timerId, + lastCallTime, + lastInvokeTime = 0, + leading = false, + maxing = false, + trailing = true; + + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + wait = toNumber(wait) || 0; + if (isObject(options)) { + leading = !!options.leading; + maxing = 'maxWait' in options; + maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait; + trailing = 'trailing' in options ? !!options.trailing : trailing; + } + + function invokeFunc(time) { + var args = lastArgs, + thisArg = lastThis; + + lastArgs = lastThis = undefined; + lastInvokeTime = time; + result = func.apply(thisArg, args); + return result; + } + + function leadingEdge(time) { + // Reset any `maxWait` timer. + lastInvokeTime = time; + // Start the timer for the trailing edge. + timerId = setTimeout(timerExpired, wait); + // Invoke the leading edge. + return leading ? invokeFunc(time) : result; + } + + function remainingWait(time) { + var timeSinceLastCall = time - lastCallTime, + timeSinceLastInvoke = time - lastInvokeTime, + timeWaiting = wait - timeSinceLastCall; + + return maxing + ? nativeMin(timeWaiting, maxWait - timeSinceLastInvoke) + : timeWaiting; + } + + function shouldInvoke(time) { + var timeSinceLastCall = time - lastCallTime, + timeSinceLastInvoke = time - lastInvokeTime; + + // Either this is the first call, activity has stopped and we're at the + // trailing edge, the system time has gone backwards and we're treating + // it as the trailing edge, or we've hit the `maxWait` limit. + return (lastCallTime === undefined || (timeSinceLastCall >= wait) || + (timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait)); + } + + function timerExpired() { + var time = now(); + if (shouldInvoke(time)) { + return trailingEdge(time); + } + // Restart the timer. + timerId = setTimeout(timerExpired, remainingWait(time)); + } + + function trailingEdge(time) { + timerId = undefined; + + // Only invoke if we have `lastArgs` which means `func` has been + // debounced at least once. + if (trailing && lastArgs) { + return invokeFunc(time); + } + lastArgs = lastThis = undefined; + return result; + } + + function cancel() { + if (timerId !== undefined) { + clearTimeout(timerId); + } + lastInvokeTime = 0; + lastArgs = lastCallTime = lastThis = timerId = undefined; + } + + function flush() { + return timerId === undefined ? result : trailingEdge(now()); + } + + function debounced() { + var time = now(), + isInvoking = shouldInvoke(time); + + lastArgs = arguments; + lastThis = this; + lastCallTime = time; + + if (isInvoking) { + if (timerId === undefined) { + return leadingEdge(lastCallTime); + } + if (maxing) { + // Handle invocations in a tight loop. + timerId = setTimeout(timerExpired, wait); + return invokeFunc(lastCallTime); + } + } + if (timerId === undefined) { + timerId = setTimeout(timerExpired, wait); + } + return result; + } + debounced.cancel = cancel; + debounced.flush = flush; + return debounced; + } + + /** + * Defers invoking the `func` until the current call stack has cleared. Any + * additional arguments are provided to `func` when it's invoked. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to defer. + * @param {...*} [args] The arguments to invoke `func` with. + * @returns {number} Returns the timer id. + * @example + * + * _.defer(function(text) { + * console.log(text); + * }, 'deferred'); + * // => Logs 'deferred' after one millisecond. + */ + var defer = baseRest(function(func, args) { + return baseDelay(func, 1, args); + }); + + /** + * Invokes `func` after `wait` milliseconds. Any additional arguments are + * provided to `func` when it's invoked. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to delay. + * @param {number} wait The number of milliseconds to delay invocation. + * @param {...*} [args] The arguments to invoke `func` with. + * @returns {number} Returns the timer id. + * @example + * + * _.delay(function(text) { + * console.log(text); + * }, 1000, 'later'); + * // => Logs 'later' after one second. + */ + var delay = baseRest(function(func, wait, args) { + return baseDelay(func, toNumber(wait) || 0, args); + }); + + /** + * Creates a function that invokes `func` with arguments reversed. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Function + * @param {Function} func The function to flip arguments for. + * @returns {Function} Returns the new flipped function. + * @example + * + * var flipped = _.flip(function() { + * return _.toArray(arguments); + * }); + * + * flipped('a', 'b', 'c', 'd'); + * // => ['d', 'c', 'b', 'a'] + */ + function flip(func) { + return createWrap(func, WRAP_FLIP_FLAG); + } + + /** + * Creates a function that memoizes the result of `func`. If `resolver` is + * provided, it determines the cache key for storing the result based on the + * arguments provided to the memoized function. By default, the first argument + * provided to the memoized function is used as the map cache key. The `func` + * is invoked with the `this` binding of the memoized function. + * + * **Note:** The cache is exposed as the `cache` property on the memoized + * function. Its creation may be customized by replacing the `_.memoize.Cache` + * constructor with one whose instances implement the + * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) + * method interface of `clear`, `delete`, `get`, `has`, and `set`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to have its output memoized. + * @param {Function} [resolver] The function to resolve the cache key. + * @returns {Function} Returns the new memoized function. + * @example + * + * var object = { 'a': 1, 'b': 2 }; + * var other = { 'c': 3, 'd': 4 }; + * + * var values = _.memoize(_.values); + * values(object); + * // => [1, 2] + * + * values(other); + * // => [3, 4] + * + * object.a = 2; + * values(object); + * // => [1, 2] + * + * // Modify the result cache. + * values.cache.set(object, ['a', 'b']); + * values(object); + * // => ['a', 'b'] + * + * // Replace `_.memoize.Cache`. + * _.memoize.Cache = WeakMap; + */ + function memoize(func, resolver) { + if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) { + throw new TypeError(FUNC_ERROR_TEXT); + } + var memoized = function() { + var args = arguments, + key = resolver ? resolver.apply(this, args) : args[0], + cache = memoized.cache; + + if (cache.has(key)) { + return cache.get(key); + } + var result = func.apply(this, args); + memoized.cache = cache.set(key, result) || cache; + return result; + }; + memoized.cache = new (memoize.Cache || MapCache); + return memoized; + } + + // Expose `MapCache`. + memoize.Cache = MapCache; + + /** + * Creates a function that negates the result of the predicate `func`. The + * `func` predicate is invoked with the `this` binding and arguments of the + * created function. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Function + * @param {Function} predicate The predicate to negate. + * @returns {Function} Returns the new negated function. + * @example + * + * function isEven(n) { + * return n % 2 == 0; + * } + * + * _.filter([1, 2, 3, 4, 5, 6], _.negate(isEven)); + * // => [1, 3, 5] + */ + function negate(predicate) { + if (typeof predicate != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + return function() { + var args = arguments; + switch (args.length) { + case 0: return !predicate.call(this); + case 1: return !predicate.call(this, args[0]); + case 2: return !predicate.call(this, args[0], args[1]); + case 3: return !predicate.call(this, args[0], args[1], args[2]); + } + return !predicate.apply(this, args); + }; + } + + /** + * Creates a function that is restricted to invoking `func` once. Repeat calls + * to the function return the value of the first invocation. The `func` is + * invoked with the `this` binding and arguments of the created function. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to restrict. + * @returns {Function} Returns the new restricted function. + * @example + * + * var initialize = _.once(createApplication); + * initialize(); + * initialize(); + * // => `createApplication` is invoked once + */ + function once(func) { + return before(2, func); + } + + /** + * Creates a function that invokes `func` with its arguments transformed. + * + * @static + * @since 4.0.0 + * @memberOf _ + * @category Function + * @param {Function} func The function to wrap. + * @param {...(Function|Function[])} [transforms=[_.identity]] + * The argument transforms. + * @returns {Function} Returns the new function. + * @example + * + * function doubled(n) { + * return n * 2; + * } + * + * function square(n) { + * return n * n; + * } + * + * var func = _.overArgs(function(x, y) { + * return [x, y]; + * }, [square, doubled]); + * + * func(9, 3); + * // => [81, 6] + * + * func(10, 5); + * // => [100, 10] + */ + var overArgs = castRest(function(func, transforms) { + transforms = (transforms.length == 1 && isArray(transforms[0])) + ? arrayMap(transforms[0], baseUnary(getIteratee())) + : arrayMap(baseFlatten(transforms, 1), baseUnary(getIteratee())); + + var funcsLength = transforms.length; + return baseRest(function(args) { + var index = -1, + length = nativeMin(args.length, funcsLength); + + while (++index < length) { + args[index] = transforms[index].call(this, args[index]); + } + return apply(func, this, args); + }); + }); + + /** + * Creates a function that invokes `func` with `partials` prepended to the + * arguments it receives. This method is like `_.bind` except it does **not** + * alter the `this` binding. + * + * The `_.partial.placeholder` value, which defaults to `_` in monolithic + * builds, may be used as a placeholder for partially applied arguments. + * + * **Note:** This method doesn't set the "length" property of partially + * applied functions. + * + * @static + * @memberOf _ + * @since 0.2.0 + * @category Function + * @param {Function} func The function to partially apply arguments to. + * @param {...*} [partials] The arguments to be partially applied. + * @returns {Function} Returns the new partially applied function. + * @example + * + * function greet(greeting, name) { + * return greeting + ' ' + name; + * } + * + * var sayHelloTo = _.partial(greet, 'hello'); + * sayHelloTo('fred'); + * // => 'hello fred' + * + * // Partially applied with placeholders. + * var greetFred = _.partial(greet, _, 'fred'); + * greetFred('hi'); + * // => 'hi fred' + */ + var partial = baseRest(function(func, partials) { + var holders = replaceHolders(partials, getHolder(partial)); + return createWrap(func, WRAP_PARTIAL_FLAG, undefined, partials, holders); + }); + + /** + * This method is like `_.partial` except that partially applied arguments + * are appended to the arguments it receives. + * + * The `_.partialRight.placeholder` value, which defaults to `_` in monolithic + * builds, may be used as a placeholder for partially applied arguments. + * + * **Note:** This method doesn't set the "length" property of partially + * applied functions. + * + * @static + * @memberOf _ + * @since 1.0.0 + * @category Function + * @param {Function} func The function to partially apply arguments to. + * @param {...*} [partials] The arguments to be partially applied. + * @returns {Function} Returns the new partially applied function. + * @example + * + * function greet(greeting, name) { + * return greeting + ' ' + name; + * } + * + * var greetFred = _.partialRight(greet, 'fred'); + * greetFred('hi'); + * // => 'hi fred' + * + * // Partially applied with placeholders. + * var sayHelloTo = _.partialRight(greet, 'hello', _); + * sayHelloTo('fred'); + * // => 'hello fred' + */ + var partialRight = baseRest(function(func, partials) { + var holders = replaceHolders(partials, getHolder(partialRight)); + return createWrap(func, WRAP_PARTIAL_RIGHT_FLAG, undefined, partials, holders); + }); + + /** + * Creates a function that invokes `func` with arguments arranged according + * to the specified `indexes` where the argument value at the first index is + * provided as the first argument, the argument value at the second index is + * provided as the second argument, and so on. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Function + * @param {Function} func The function to rearrange arguments for. + * @param {...(number|number[])} indexes The arranged argument indexes. + * @returns {Function} Returns the new function. + * @example + * + * var rearged = _.rearg(function(a, b, c) { + * return [a, b, c]; + * }, [2, 0, 1]); + * + * rearged('b', 'c', 'a') + * // => ['a', 'b', 'c'] + */ + var rearg = flatRest(function(func, indexes) { + return createWrap(func, WRAP_REARG_FLAG, undefined, undefined, undefined, indexes); + }); + + /** + * Creates a function that invokes `func` with the `this` binding of the + * created function and arguments from `start` and beyond provided as + * an array. + * + * **Note:** This method is based on the + * [rest parameter](https://mdn.io/rest_parameters). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Function + * @param {Function} func The function to apply a rest parameter to. + * @param {number} [start=func.length-1] The start position of the rest parameter. + * @returns {Function} Returns the new function. + * @example + * + * var say = _.rest(function(what, names) { + * return what + ' ' + _.initial(names).join(', ') + + * (_.size(names) > 1 ? ', & ' : '') + _.last(names); + * }); + * + * say('hello', 'fred', 'barney', 'pebbles'); + * // => 'hello fred, barney, & pebbles' + */ + function rest(func, start) { + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + start = start === undefined ? start : toInteger(start); + return baseRest(func, start); + } + + /** + * Creates a function that invokes `func` with the `this` binding of the + * create function and an array of arguments much like + * [`Function#apply`](http://www.ecma-international.org/ecma-262/7.0/#sec-function.prototype.apply). + * + * **Note:** This method is based on the + * [spread operator](https://mdn.io/spread_operator). + * + * @static + * @memberOf _ + * @since 3.2.0 + * @category Function + * @param {Function} func The function to spread arguments over. + * @param {number} [start=0] The start position of the spread. + * @returns {Function} Returns the new function. + * @example + * + * var say = _.spread(function(who, what) { + * return who + ' says ' + what; + * }); + * + * say(['fred', 'hello']); + * // => 'fred says hello' + * + * var numbers = Promise.all([ + * Promise.resolve(40), + * Promise.resolve(36) + * ]); + * + * numbers.then(_.spread(function(x, y) { + * return x + y; + * })); + * // => a Promise of 76 + */ + function spread(func, start) { + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + start = start == null ? 0 : nativeMax(toInteger(start), 0); + return baseRest(function(args) { + var array = args[start], + otherArgs = castSlice(args, 0, start); + + if (array) { + arrayPush(otherArgs, array); + } + return apply(func, this, otherArgs); + }); + } + + /** + * Creates a throttled function that only invokes `func` at most once per + * every `wait` milliseconds. The throttled function comes with a `cancel` + * method to cancel delayed `func` invocations and a `flush` method to + * immediately invoke them. Provide `options` to indicate whether `func` + * should be invoked on the leading and/or trailing edge of the `wait` + * timeout. The `func` is invoked with the last arguments provided to the + * throttled function. Subsequent calls to the throttled function return the + * result of the last `func` invocation. + * + * **Note:** If `leading` and `trailing` options are `true`, `func` is + * invoked on the trailing edge of the timeout only if the throttled function + * is invoked more than once during the `wait` timeout. + * + * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred + * until to the next tick, similar to `setTimeout` with a timeout of `0`. + * + * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/) + * for details over the differences between `_.throttle` and `_.debounce`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to throttle. + * @param {number} [wait=0] The number of milliseconds to throttle invocations to. + * @param {Object} [options={}] The options object. + * @param {boolean} [options.leading=true] + * Specify invoking on the leading edge of the timeout. + * @param {boolean} [options.trailing=true] + * Specify invoking on the trailing edge of the timeout. + * @returns {Function} Returns the new throttled function. + * @example + * + * // Avoid excessively updating the position while scrolling. + * jQuery(window).on('scroll', _.throttle(updatePosition, 100)); + * + * // Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes. + * var throttled = _.throttle(renewToken, 300000, { 'trailing': false }); + * jQuery(element).on('click', throttled); + * + * // Cancel the trailing throttled invocation. + * jQuery(window).on('popstate', throttled.cancel); + */ + function throttle(func, wait, options) { + var leading = true, + trailing = true; + + if (typeof func != 'function') { + throw new TypeError(FUNC_ERROR_TEXT); + } + if (isObject(options)) { + leading = 'leading' in options ? !!options.leading : leading; + trailing = 'trailing' in options ? !!options.trailing : trailing; + } + return debounce(func, wait, { + 'leading': leading, + 'maxWait': wait, + 'trailing': trailing + }); + } + + /** + * Creates a function that accepts up to one argument, ignoring any + * additional arguments. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Function + * @param {Function} func The function to cap arguments for. + * @returns {Function} Returns the new capped function. + * @example + * + * _.map(['6', '8', '10'], _.unary(parseInt)); + * // => [6, 8, 10] + */ + function unary(func) { + return ary(func, 1); + } + + /** + * Creates a function that provides `value` to `wrapper` as its first + * argument. Any additional arguments provided to the function are appended + * to those provided to the `wrapper`. The wrapper is invoked with the `this` + * binding of the created function. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {*} value The value to wrap. + * @param {Function} [wrapper=identity] The wrapper function. + * @returns {Function} Returns the new function. + * @example + * + * var p = _.wrap(_.escape, function(func, text) { + * return '

' + func(text) + '

'; + * }); + * + * p('fred, barney, & pebbles'); + * // => '

fred, barney, & pebbles

' + */ + function wrap(value, wrapper) { + return partial(castFunction(wrapper), value); + } + + /*------------------------------------------------------------------------*/ + + /** + * Casts `value` as an array if it's not one. + * + * @static + * @memberOf _ + * @since 4.4.0 + * @category Lang + * @param {*} value The value to inspect. + * @returns {Array} Returns the cast array. + * @example + * + * _.castArray(1); + * // => [1] + * + * _.castArray({ 'a': 1 }); + * // => [{ 'a': 1 }] + * + * _.castArray('abc'); + * // => ['abc'] + * + * _.castArray(null); + * // => [null] + * + * _.castArray(undefined); + * // => [undefined] + * + * _.castArray(); + * // => [] + * + * var array = [1, 2, 3]; + * console.log(_.castArray(array) === array); + * // => true + */ + function castArray() { + if (!arguments.length) { + return []; + } + var value = arguments[0]; + return isArray(value) ? value : [value]; + } + + /** + * Creates a shallow clone of `value`. + * + * **Note:** This method is loosely based on the + * [structured clone algorithm](https://mdn.io/Structured_clone_algorithm) + * and supports cloning arrays, array buffers, booleans, date objects, maps, + * numbers, `Object` objects, regexes, sets, strings, symbols, and typed + * arrays. The own enumerable properties of `arguments` objects are cloned + * as plain objects. An empty object is returned for uncloneable values such + * as error objects, functions, DOM nodes, and WeakMaps. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to clone. + * @returns {*} Returns the cloned value. + * @see _.cloneDeep + * @example + * + * var objects = [{ 'a': 1 }, { 'b': 2 }]; + * + * var shallow = _.clone(objects); + * console.log(shallow[0] === objects[0]); + * // => true + */ + function clone(value) { + return baseClone(value, CLONE_SYMBOLS_FLAG); + } + + /** + * This method is like `_.clone` except that it accepts `customizer` which + * is invoked to produce the cloned value. If `customizer` returns `undefined`, + * cloning is handled by the method instead. The `customizer` is invoked with + * up to four arguments; (value [, index|key, object, stack]). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to clone. + * @param {Function} [customizer] The function to customize cloning. + * @returns {*} Returns the cloned value. + * @see _.cloneDeepWith + * @example + * + * function customizer(value) { + * if (_.isElement(value)) { + * return value.cloneNode(false); + * } + * } + * + * var el = _.cloneWith(document.body, customizer); + * + * console.log(el === document.body); + * // => false + * console.log(el.nodeName); + * // => 'BODY' + * console.log(el.childNodes.length); + * // => 0 + */ + function cloneWith(value, customizer) { + customizer = typeof customizer == 'function' ? customizer : undefined; + return baseClone(value, CLONE_SYMBOLS_FLAG, customizer); + } + + /** + * This method is like `_.clone` except that it recursively clones `value`. + * + * @static + * @memberOf _ + * @since 1.0.0 + * @category Lang + * @param {*} value The value to recursively clone. + * @returns {*} Returns the deep cloned value. + * @see _.clone + * @example + * + * var objects = [{ 'a': 1 }, { 'b': 2 }]; + * + * var deep = _.cloneDeep(objects); + * console.log(deep[0] === objects[0]); + * // => false + */ + function cloneDeep(value) { + return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG); + } + + /** + * This method is like `_.cloneWith` except that it recursively clones `value`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to recursively clone. + * @param {Function} [customizer] The function to customize cloning. + * @returns {*} Returns the deep cloned value. + * @see _.cloneWith + * @example + * + * function customizer(value) { + * if (_.isElement(value)) { + * return value.cloneNode(true); + * } + * } + * + * var el = _.cloneDeepWith(document.body, customizer); + * + * console.log(el === document.body); + * // => false + * console.log(el.nodeName); + * // => 'BODY' + * console.log(el.childNodes.length); + * // => 20 + */ + function cloneDeepWith(value, customizer) { + customizer = typeof customizer == 'function' ? customizer : undefined; + return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG, customizer); + } + + /** + * Checks if `object` conforms to `source` by invoking the predicate + * properties of `source` with the corresponding property values of `object`. + * + * **Note:** This method is equivalent to `_.conforms` when `source` is + * partially applied. + * + * @static + * @memberOf _ + * @since 4.14.0 + * @category Lang + * @param {Object} object The object to inspect. + * @param {Object} source The object of property predicates to conform to. + * @returns {boolean} Returns `true` if `object` conforms, else `false`. + * @example + * + * var object = { 'a': 1, 'b': 2 }; + * + * _.conformsTo(object, { 'b': function(n) { return n > 1; } }); + * // => true + * + * _.conformsTo(object, { 'b': function(n) { return n > 2; } }); + * // => false + */ + function conformsTo(object, source) { + return source == null || baseConformsTo(object, source, keys(source)); + } + + /** + * Performs a + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * comparison between two values to determine if they are equivalent. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + * @example + * + * var object = { 'a': 1 }; + * var other = { 'a': 1 }; + * + * _.eq(object, object); + * // => true + * + * _.eq(object, other); + * // => false + * + * _.eq('a', 'a'); + * // => true + * + * _.eq('a', Object('a')); + * // => false + * + * _.eq(NaN, NaN); + * // => true + */ + function eq(value, other) { + return value === other || (value !== value && other !== other); + } + + /** + * Checks if `value` is greater than `other`. + * + * @static + * @memberOf _ + * @since 3.9.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if `value` is greater than `other`, + * else `false`. + * @see _.lt + * @example + * + * _.gt(3, 1); + * // => true + * + * _.gt(3, 3); + * // => false + * + * _.gt(1, 3); + * // => false + */ + var gt = createRelationalOperation(baseGt); + + /** + * Checks if `value` is greater than or equal to `other`. + * + * @static + * @memberOf _ + * @since 3.9.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if `value` is greater than or equal to + * `other`, else `false`. + * @see _.lte + * @example + * + * _.gte(3, 1); + * // => true + * + * _.gte(3, 3); + * // => true + * + * _.gte(1, 3); + * // => false + */ + var gte = createRelationalOperation(function(value, other) { + return value >= other; + }); + + /** + * Checks if `value` is likely an `arguments` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an `arguments` object, + * else `false`. + * @example + * + * _.isArguments(function() { return arguments; }()); + * // => true + * + * _.isArguments([1, 2, 3]); + * // => false + */ + var isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) { + return isObjectLike(value) && hasOwnProperty.call(value, 'callee') && + !propertyIsEnumerable.call(value, 'callee'); + }; + + /** + * Checks if `value` is classified as an `Array` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an array, else `false`. + * @example + * + * _.isArray([1, 2, 3]); + * // => true + * + * _.isArray(document.body.children); + * // => false + * + * _.isArray('abc'); + * // => false + * + * _.isArray(_.noop); + * // => false + */ + var isArray = Array.isArray; + + /** + * Checks if `value` is classified as an `ArrayBuffer` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an array buffer, else `false`. + * @example + * + * _.isArrayBuffer(new ArrayBuffer(2)); + * // => true + * + * _.isArrayBuffer(new Array(2)); + * // => false + */ + var isArrayBuffer = nodeIsArrayBuffer ? baseUnary(nodeIsArrayBuffer) : baseIsArrayBuffer; + + /** + * Checks if `value` is array-like. A value is considered array-like if it's + * not a function and has a `value.length` that's an integer greater than or + * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is array-like, else `false`. + * @example + * + * _.isArrayLike([1, 2, 3]); + * // => true + * + * _.isArrayLike(document.body.children); + * // => true + * + * _.isArrayLike('abc'); + * // => true + * + * _.isArrayLike(_.noop); + * // => false + */ + function isArrayLike(value) { + return value != null && isLength(value.length) && !isFunction(value); + } + + /** + * This method is like `_.isArrayLike` except that it also checks if `value` + * is an object. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an array-like object, + * else `false`. + * @example + * + * _.isArrayLikeObject([1, 2, 3]); + * // => true + * + * _.isArrayLikeObject(document.body.children); + * // => true + * + * _.isArrayLikeObject('abc'); + * // => false + * + * _.isArrayLikeObject(_.noop); + * // => false + */ + function isArrayLikeObject(value) { + return isObjectLike(value) && isArrayLike(value); + } + + /** + * Checks if `value` is classified as a boolean primitive or object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a boolean, else `false`. + * @example + * + * _.isBoolean(false); + * // => true + * + * _.isBoolean(null); + * // => false + */ + function isBoolean(value) { + return value === true || value === false || + (isObjectLike(value) && baseGetTag(value) == boolTag); + } + + /** + * Checks if `value` is a buffer. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a buffer, else `false`. + * @example + * + * _.isBuffer(new Buffer(2)); + * // => true + * + * _.isBuffer(new Uint8Array(2)); + * // => false + */ + var isBuffer = nativeIsBuffer || stubFalse; + + /** + * Checks if `value` is classified as a `Date` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a date object, else `false`. + * @example + * + * _.isDate(new Date); + * // => true + * + * _.isDate('Mon April 23 2012'); + * // => false + */ + var isDate = nodeIsDate ? baseUnary(nodeIsDate) : baseIsDate; + + /** + * Checks if `value` is likely a DOM element. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a DOM element, else `false`. + * @example + * + * _.isElement(document.body); + * // => true + * + * _.isElement(''); + * // => false + */ + function isElement(value) { + return isObjectLike(value) && value.nodeType === 1 && !isPlainObject(value); + } + + /** + * Checks if `value` is an empty object, collection, map, or set. + * + * Objects are considered empty if they have no own enumerable string keyed + * properties. + * + * Array-like values such as `arguments` objects, arrays, buffers, strings, or + * jQuery-like collections are considered empty if they have a `length` of `0`. + * Similarly, maps and sets are considered empty if they have a `size` of `0`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is empty, else `false`. + * @example + * + * _.isEmpty(null); + * // => true + * + * _.isEmpty(true); + * // => true + * + * _.isEmpty(1); + * // => true + * + * _.isEmpty([1, 2, 3]); + * // => false + * + * _.isEmpty({ 'a': 1 }); + * // => false + */ + function isEmpty(value) { + if (value == null) { + return true; + } + if (isArrayLike(value) && + (isArray(value) || typeof value == 'string' || typeof value.splice == 'function' || + isBuffer(value) || isTypedArray(value) || isArguments(value))) { + return !value.length; + } + var tag = getTag(value); + if (tag == mapTag || tag == setTag) { + return !value.size; + } + if (isPrototype(value)) { + return !baseKeys(value).length; + } + for (var key in value) { + if (hasOwnProperty.call(value, key)) { + return false; + } + } + return true; + } + + /** + * Performs a deep comparison between two values to determine if they are + * equivalent. + * + * **Note:** This method supports comparing arrays, array buffers, booleans, + * date objects, error objects, maps, numbers, `Object` objects, regexes, + * sets, strings, symbols, and typed arrays. `Object` objects are compared + * by their own, not inherited, enumerable properties. Functions and DOM + * nodes are compared by strict equality, i.e. `===`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + * @example + * + * var object = { 'a': 1 }; + * var other = { 'a': 1 }; + * + * _.isEqual(object, other); + * // => true + * + * object === other; + * // => false + */ + function isEqual(value, other) { + return baseIsEqual(value, other); + } + + /** + * This method is like `_.isEqual` except that it accepts `customizer` which + * is invoked to compare values. If `customizer` returns `undefined`, comparisons + * are handled by the method instead. The `customizer` is invoked with up to + * six arguments: (objValue, othValue [, index|key, object, other, stack]). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @param {Function} [customizer] The function to customize comparisons. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + * @example + * + * function isGreeting(value) { + * return /^h(?:i|ello)$/.test(value); + * } + * + * function customizer(objValue, othValue) { + * if (isGreeting(objValue) && isGreeting(othValue)) { + * return true; + * } + * } + * + * var array = ['hello', 'goodbye']; + * var other = ['hi', 'goodbye']; + * + * _.isEqualWith(array, other, customizer); + * // => true + */ + function isEqualWith(value, other, customizer) { + customizer = typeof customizer == 'function' ? customizer : undefined; + var result = customizer ? customizer(value, other) : undefined; + return result === undefined ? baseIsEqual(value, other, undefined, customizer) : !!result; + } + + /** + * Checks if `value` is an `Error`, `EvalError`, `RangeError`, `ReferenceError`, + * `SyntaxError`, `TypeError`, or `URIError` object. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an error object, else `false`. + * @example + * + * _.isError(new Error); + * // => true + * + * _.isError(Error); + * // => false + */ + function isError(value) { + if (!isObjectLike(value)) { + return false; + } + var tag = baseGetTag(value); + return tag == errorTag || tag == domExcTag || + (typeof value.message == 'string' && typeof value.name == 'string' && !isPlainObject(value)); + } + + /** + * Checks if `value` is a finite primitive number. + * + * **Note:** This method is based on + * [`Number.isFinite`](https://mdn.io/Number/isFinite). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a finite number, else `false`. + * @example + * + * _.isFinite(3); + * // => true + * + * _.isFinite(Number.MIN_VALUE); + * // => true + * + * _.isFinite(Infinity); + * // => false + * + * _.isFinite('3'); + * // => false + */ + function isFinite(value) { + return typeof value == 'number' && nativeIsFinite(value); + } + + /** + * Checks if `value` is classified as a `Function` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a function, else `false`. + * @example + * + * _.isFunction(_); + * // => true + * + * _.isFunction(/abc/); + * // => false + */ + function isFunction(value) { + if (!isObject(value)) { + return false; + } + // The use of `Object#toString` avoids issues with the `typeof` operator + // in Safari 9 which returns 'object' for typed arrays and other constructors. + var tag = baseGetTag(value); + return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag; + } + + /** + * Checks if `value` is an integer. + * + * **Note:** This method is based on + * [`Number.isInteger`](https://mdn.io/Number/isInteger). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an integer, else `false`. + * @example + * + * _.isInteger(3); + * // => true + * + * _.isInteger(Number.MIN_VALUE); + * // => false + * + * _.isInteger(Infinity); + * // => false + * + * _.isInteger('3'); + * // => false + */ + function isInteger(value) { + return typeof value == 'number' && value == toInteger(value); + } + + /** + * Checks if `value` is a valid array-like length. + * + * **Note:** This method is loosely based on + * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a valid length, else `false`. + * @example + * + * _.isLength(3); + * // => true + * + * _.isLength(Number.MIN_VALUE); + * // => false + * + * _.isLength(Infinity); + * // => false + * + * _.isLength('3'); + * // => false + */ + function isLength(value) { + return typeof value == 'number' && + value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER; + } + + /** + * Checks if `value` is the + * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) + * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an object, else `false`. + * @example + * + * _.isObject({}); + * // => true + * + * _.isObject([1, 2, 3]); + * // => true + * + * _.isObject(_.noop); + * // => true + * + * _.isObject(null); + * // => false + */ + function isObject(value) { + var type = typeof value; + return value != null && (type == 'object' || type == 'function'); + } + + /** + * Checks if `value` is object-like. A value is object-like if it's not `null` + * and has a `typeof` result of "object". + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is object-like, else `false`. + * @example + * + * _.isObjectLike({}); + * // => true + * + * _.isObjectLike([1, 2, 3]); + * // => true + * + * _.isObjectLike(_.noop); + * // => false + * + * _.isObjectLike(null); + * // => false + */ + function isObjectLike(value) { + return value != null && typeof value == 'object'; + } + + /** + * Checks if `value` is classified as a `Map` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a map, else `false`. + * @example + * + * _.isMap(new Map); + * // => true + * + * _.isMap(new WeakMap); + * // => false + */ + var isMap = nodeIsMap ? baseUnary(nodeIsMap) : baseIsMap; + + /** + * Performs a partial deep comparison between `object` and `source` to + * determine if `object` contains equivalent property values. + * + * **Note:** This method is equivalent to `_.matches` when `source` is + * partially applied. + * + * Partial comparisons will match empty array and empty object `source` + * values against any array or object value, respectively. See `_.isEqual` + * for a list of supported value comparisons. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {Object} object The object to inspect. + * @param {Object} source The object of property values to match. + * @returns {boolean} Returns `true` if `object` is a match, else `false`. + * @example + * + * var object = { 'a': 1, 'b': 2 }; + * + * _.isMatch(object, { 'b': 2 }); + * // => true + * + * _.isMatch(object, { 'b': 1 }); + * // => false + */ + function isMatch(object, source) { + return object === source || baseIsMatch(object, source, getMatchData(source)); + } + + /** + * This method is like `_.isMatch` except that it accepts `customizer` which + * is invoked to compare values. If `customizer` returns `undefined`, comparisons + * are handled by the method instead. The `customizer` is invoked with five + * arguments: (objValue, srcValue, index|key, object, source). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {Object} object The object to inspect. + * @param {Object} source The object of property values to match. + * @param {Function} [customizer] The function to customize comparisons. + * @returns {boolean} Returns `true` if `object` is a match, else `false`. + * @example + * + * function isGreeting(value) { + * return /^h(?:i|ello)$/.test(value); + * } + * + * function customizer(objValue, srcValue) { + * if (isGreeting(objValue) && isGreeting(srcValue)) { + * return true; + * } + * } + * + * var object = { 'greeting': 'hello' }; + * var source = { 'greeting': 'hi' }; + * + * _.isMatchWith(object, source, customizer); + * // => true + */ + function isMatchWith(object, source, customizer) { + customizer = typeof customizer == 'function' ? customizer : undefined; + return baseIsMatch(object, source, getMatchData(source), customizer); + } + + /** + * Checks if `value` is `NaN`. + * + * **Note:** This method is based on + * [`Number.isNaN`](https://mdn.io/Number/isNaN) and is not the same as + * global [`isNaN`](https://mdn.io/isNaN) which returns `true` for + * `undefined` and other non-number values. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is `NaN`, else `false`. + * @example + * + * _.isNaN(NaN); + * // => true + * + * _.isNaN(new Number(NaN)); + * // => true + * + * isNaN(undefined); + * // => true + * + * _.isNaN(undefined); + * // => false + */ + function isNaN(value) { + // An `NaN` primitive is the only value that is not equal to itself. + // Perform the `toStringTag` check first to avoid errors with some + // ActiveX objects in IE. + return isNumber(value) && value != +value; + } + + /** + * Checks if `value` is a pristine native function. + * + * **Note:** This method can't reliably detect native functions in the presence + * of the core-js package because core-js circumvents this kind of detection. + * Despite multiple requests, the core-js maintainer has made it clear: any + * attempt to fix the detection will be obstructed. As a result, we're left + * with little choice but to throw an error. Unfortunately, this also affects + * packages, like [babel-polyfill](https://www.npmjs.com/package/babel-polyfill), + * which rely on core-js. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a native function, + * else `false`. + * @example + * + * _.isNative(Array.prototype.push); + * // => true + * + * _.isNative(_); + * // => false + */ + function isNative(value) { + if (isMaskable(value)) { + throw new Error(CORE_ERROR_TEXT); + } + return baseIsNative(value); + } + + /** + * Checks if `value` is `null`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is `null`, else `false`. + * @example + * + * _.isNull(null); + * // => true + * + * _.isNull(void 0); + * // => false + */ + function isNull(value) { + return value === null; + } + + /** + * Checks if `value` is `null` or `undefined`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is nullish, else `false`. + * @example + * + * _.isNil(null); + * // => true + * + * _.isNil(void 0); + * // => true + * + * _.isNil(NaN); + * // => false + */ + function isNil(value) { + return value == null; + } + + /** + * Checks if `value` is classified as a `Number` primitive or object. + * + * **Note:** To exclude `Infinity`, `-Infinity`, and `NaN`, which are + * classified as numbers, use the `_.isFinite` method. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a number, else `false`. + * @example + * + * _.isNumber(3); + * // => true + * + * _.isNumber(Number.MIN_VALUE); + * // => true + * + * _.isNumber(Infinity); + * // => true + * + * _.isNumber('3'); + * // => false + */ + function isNumber(value) { + return typeof value == 'number' || + (isObjectLike(value) && baseGetTag(value) == numberTag); + } + + /** + * Checks if `value` is a plain object, that is, an object created by the + * `Object` constructor or one with a `[[Prototype]]` of `null`. + * + * @static + * @memberOf _ + * @since 0.8.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a plain object, else `false`. + * @example + * + * function Foo() { + * this.a = 1; + * } + * + * _.isPlainObject(new Foo); + * // => false + * + * _.isPlainObject([1, 2, 3]); + * // => false + * + * _.isPlainObject({ 'x': 0, 'y': 0 }); + * // => true + * + * _.isPlainObject(Object.create(null)); + * // => true + */ + function isPlainObject(value) { + if (!isObjectLike(value) || baseGetTag(value) != objectTag) { + return false; + } + var proto = getPrototype(value); + if (proto === null) { + return true; + } + var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor; + return typeof Ctor == 'function' && Ctor instanceof Ctor && + funcToString.call(Ctor) == objectCtorString; + } + + /** + * Checks if `value` is classified as a `RegExp` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a regexp, else `false`. + * @example + * + * _.isRegExp(/abc/); + * // => true + * + * _.isRegExp('/abc/'); + * // => false + */ + var isRegExp = nodeIsRegExp ? baseUnary(nodeIsRegExp) : baseIsRegExp; + + /** + * Checks if `value` is a safe integer. An integer is safe if it's an IEEE-754 + * double precision number which isn't the result of a rounded unsafe integer. + * + * **Note:** This method is based on + * [`Number.isSafeInteger`](https://mdn.io/Number/isSafeInteger). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a safe integer, else `false`. + * @example + * + * _.isSafeInteger(3); + * // => true + * + * _.isSafeInteger(Number.MIN_VALUE); + * // => false + * + * _.isSafeInteger(Infinity); + * // => false + * + * _.isSafeInteger('3'); + * // => false + */ + function isSafeInteger(value) { + return isInteger(value) && value >= -MAX_SAFE_INTEGER && value <= MAX_SAFE_INTEGER; + } + + /** + * Checks if `value` is classified as a `Set` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a set, else `false`. + * @example + * + * _.isSet(new Set); + * // => true + * + * _.isSet(new WeakSet); + * // => false + */ + var isSet = nodeIsSet ? baseUnary(nodeIsSet) : baseIsSet; + + /** + * Checks if `value` is classified as a `String` primitive or object. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a string, else `false`. + * @example + * + * _.isString('abc'); + * // => true + * + * _.isString(1); + * // => false + */ + function isString(value) { + return typeof value == 'string' || + (!isArray(value) && isObjectLike(value) && baseGetTag(value) == stringTag); + } + + /** + * Checks if `value` is classified as a `Symbol` primitive or object. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. + * @example + * + * _.isSymbol(Symbol.iterator); + * // => true + * + * _.isSymbol('abc'); + * // => false + */ + function isSymbol(value) { + return typeof value == 'symbol' || + (isObjectLike(value) && baseGetTag(value) == symbolTag); + } + + /** + * Checks if `value` is classified as a typed array. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. + * @example + * + * _.isTypedArray(new Uint8Array); + * // => true + * + * _.isTypedArray([]); + * // => false + */ + var isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray; + + /** + * Checks if `value` is `undefined`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is `undefined`, else `false`. + * @example + * + * _.isUndefined(void 0); + * // => true + * + * _.isUndefined(null); + * // => false + */ + function isUndefined(value) { + return value === undefined; + } + + /** + * Checks if `value` is classified as a `WeakMap` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a weak map, else `false`. + * @example + * + * _.isWeakMap(new WeakMap); + * // => true + * + * _.isWeakMap(new Map); + * // => false + */ + function isWeakMap(value) { + return isObjectLike(value) && getTag(value) == weakMapTag; + } + + /** + * Checks if `value` is classified as a `WeakSet` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a weak set, else `false`. + * @example + * + * _.isWeakSet(new WeakSet); + * // => true + * + * _.isWeakSet(new Set); + * // => false + */ + function isWeakSet(value) { + return isObjectLike(value) && baseGetTag(value) == weakSetTag; + } + + /** + * Checks if `value` is less than `other`. + * + * @static + * @memberOf _ + * @since 3.9.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if `value` is less than `other`, + * else `false`. + * @see _.gt + * @example + * + * _.lt(1, 3); + * // => true + * + * _.lt(3, 3); + * // => false + * + * _.lt(3, 1); + * // => false + */ + var lt = createRelationalOperation(baseLt); + + /** + * Checks if `value` is less than or equal to `other`. + * + * @static + * @memberOf _ + * @since 3.9.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if `value` is less than or equal to + * `other`, else `false`. + * @see _.gte + * @example + * + * _.lte(1, 3); + * // => true + * + * _.lte(3, 3); + * // => true + * + * _.lte(3, 1); + * // => false + */ + var lte = createRelationalOperation(function(value, other) { + return value <= other; + }); + + /** + * Converts `value` to an array. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Lang + * @param {*} value The value to convert. + * @returns {Array} Returns the converted array. + * @example + * + * _.toArray({ 'a': 1, 'b': 2 }); + * // => [1, 2] + * + * _.toArray('abc'); + * // => ['a', 'b', 'c'] + * + * _.toArray(1); + * // => [] + * + * _.toArray(null); + * // => [] + */ + function toArray(value) { + if (!value) { + return []; + } + if (isArrayLike(value)) { + return isString(value) ? stringToArray(value) : copyArray(value); + } + if (symIterator && value[symIterator]) { + return iteratorToArray(value[symIterator]()); + } + var tag = getTag(value), + func = tag == mapTag ? mapToArray : (tag == setTag ? setToArray : values); + + return func(value); + } + + /** + * Converts `value` to a finite number. + * + * @static + * @memberOf _ + * @since 4.12.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {number} Returns the converted number. + * @example + * + * _.toFinite(3.2); + * // => 3.2 + * + * _.toFinite(Number.MIN_VALUE); + * // => 5e-324 + * + * _.toFinite(Infinity); + * // => 1.7976931348623157e+308 + * + * _.toFinite('3.2'); + * // => 3.2 + */ + function toFinite(value) { + if (!value) { + return value === 0 ? value : 0; + } + value = toNumber(value); + if (value === INFINITY || value === -INFINITY) { + var sign = (value < 0 ? -1 : 1); + return sign * MAX_INTEGER; + } + return value === value ? value : 0; + } + + /** + * Converts `value` to an integer. + * + * **Note:** This method is loosely based on + * [`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {number} Returns the converted integer. + * @example + * + * _.toInteger(3.2); + * // => 3 + * + * _.toInteger(Number.MIN_VALUE); + * // => 0 + * + * _.toInteger(Infinity); + * // => 1.7976931348623157e+308 + * + * _.toInteger('3.2'); + * // => 3 + */ + function toInteger(value) { + var result = toFinite(value), + remainder = result % 1; + + return result === result ? (remainder ? result - remainder : result) : 0; + } + + /** + * Converts `value` to an integer suitable for use as the length of an + * array-like object. + * + * **Note:** This method is based on + * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {number} Returns the converted integer. + * @example + * + * _.toLength(3.2); + * // => 3 + * + * _.toLength(Number.MIN_VALUE); + * // => 0 + * + * _.toLength(Infinity); + * // => 4294967295 + * + * _.toLength('3.2'); + * // => 3 + */ + function toLength(value) { + return value ? baseClamp(toInteger(value), 0, MAX_ARRAY_LENGTH) : 0; + } + + /** + * Converts `value` to a number. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to process. + * @returns {number} Returns the number. + * @example + * + * _.toNumber(3.2); + * // => 3.2 + * + * _.toNumber(Number.MIN_VALUE); + * // => 5e-324 + * + * _.toNumber(Infinity); + * // => Infinity + * + * _.toNumber('3.2'); + * // => 3.2 + */ + function toNumber(value) { + if (typeof value == 'number') { + return value; + } + if (isSymbol(value)) { + return NAN; + } + if (isObject(value)) { + var other = typeof value.valueOf == 'function' ? value.valueOf() : value; + value = isObject(other) ? (other + '') : other; + } + if (typeof value != 'string') { + return value === 0 ? value : +value; + } + value = value.replace(reTrim, ''); + var isBinary = reIsBinary.test(value); + return (isBinary || reIsOctal.test(value)) + ? freeParseInt(value.slice(2), isBinary ? 2 : 8) + : (reIsBadHex.test(value) ? NAN : +value); + } + + /** + * Converts `value` to a plain object flattening inherited enumerable string + * keyed properties of `value` to own properties of the plain object. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {Object} Returns the converted plain object. + * @example + * + * function Foo() { + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.assign({ 'a': 1 }, new Foo); + * // => { 'a': 1, 'b': 2 } + * + * _.assign({ 'a': 1 }, _.toPlainObject(new Foo)); + * // => { 'a': 1, 'b': 2, 'c': 3 } + */ + function toPlainObject(value) { + return copyObject(value, keysIn(value)); + } + + /** + * Converts `value` to a safe integer. A safe integer can be compared and + * represented correctly. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {number} Returns the converted integer. + * @example + * + * _.toSafeInteger(3.2); + * // => 3 + * + * _.toSafeInteger(Number.MIN_VALUE); + * // => 0 + * + * _.toSafeInteger(Infinity); + * // => 9007199254740991 + * + * _.toSafeInteger('3.2'); + * // => 3 + */ + function toSafeInteger(value) { + return value + ? baseClamp(toInteger(value), -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER) + : (value === 0 ? value : 0); + } + + /** + * Converts `value` to a string. An empty string is returned for `null` + * and `undefined` values. The sign of `-0` is preserved. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {string} Returns the converted string. + * @example + * + * _.toString(null); + * // => '' + * + * _.toString(-0); + * // => '-0' + * + * _.toString([1, 2, 3]); + * // => '1,2,3' + */ + function toString(value) { + return value == null ? '' : baseToString(value); + } + + /*------------------------------------------------------------------------*/ + + /** + * Assigns own enumerable string keyed properties of source objects to the + * destination object. Source objects are applied from left to right. + * Subsequent sources overwrite property assignments of previous sources. + * + * **Note:** This method mutates `object` and is loosely based on + * [`Object.assign`](https://mdn.io/Object/assign). + * + * @static + * @memberOf _ + * @since 0.10.0 + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @see _.assignIn + * @example + * + * function Foo() { + * this.a = 1; + * } + * + * function Bar() { + * this.c = 3; + * } + * + * Foo.prototype.b = 2; + * Bar.prototype.d = 4; + * + * _.assign({ 'a': 0 }, new Foo, new Bar); + * // => { 'a': 1, 'c': 3 } + */ + var assign = createAssigner(function(object, source) { + if (isPrototype(source) || isArrayLike(source)) { + copyObject(source, keys(source), object); + return; + } + for (var key in source) { + if (hasOwnProperty.call(source, key)) { + assignValue(object, key, source[key]); + } + } + }); + + /** + * This method is like `_.assign` except that it iterates over own and + * inherited source properties. + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @alias extend + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @see _.assign + * @example + * + * function Foo() { + * this.a = 1; + * } + * + * function Bar() { + * this.c = 3; + * } + * + * Foo.prototype.b = 2; + * Bar.prototype.d = 4; + * + * _.assignIn({ 'a': 0 }, new Foo, new Bar); + * // => { 'a': 1, 'b': 2, 'c': 3, 'd': 4 } + */ + var assignIn = createAssigner(function(object, source) { + copyObject(source, keysIn(source), object); + }); + + /** + * This method is like `_.assignIn` except that it accepts `customizer` + * which is invoked to produce the assigned values. If `customizer` returns + * `undefined`, assignment is handled by the method instead. The `customizer` + * is invoked with five arguments: (objValue, srcValue, key, object, source). + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @alias extendWith + * @category Object + * @param {Object} object The destination object. + * @param {...Object} sources The source objects. + * @param {Function} [customizer] The function to customize assigned values. + * @returns {Object} Returns `object`. + * @see _.assignWith + * @example + * + * function customizer(objValue, srcValue) { + * return _.isUndefined(objValue) ? srcValue : objValue; + * } + * + * var defaults = _.partialRight(_.assignInWith, customizer); + * + * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); + * // => { 'a': 1, 'b': 2 } + */ + var assignInWith = createAssigner(function(object, source, srcIndex, customizer) { + copyObject(source, keysIn(source), object, customizer); + }); + + /** + * This method is like `_.assign` except that it accepts `customizer` + * which is invoked to produce the assigned values. If `customizer` returns + * `undefined`, assignment is handled by the method instead. The `customizer` + * is invoked with five arguments: (objValue, srcValue, key, object, source). + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The destination object. + * @param {...Object} sources The source objects. + * @param {Function} [customizer] The function to customize assigned values. + * @returns {Object} Returns `object`. + * @see _.assignInWith + * @example + * + * function customizer(objValue, srcValue) { + * return _.isUndefined(objValue) ? srcValue : objValue; + * } + * + * var defaults = _.partialRight(_.assignWith, customizer); + * + * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); + * // => { 'a': 1, 'b': 2 } + */ + var assignWith = createAssigner(function(object, source, srcIndex, customizer) { + copyObject(source, keys(source), object, customizer); + }); + + /** + * Creates an array of values corresponding to `paths` of `object`. + * + * @static + * @memberOf _ + * @since 1.0.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {...(string|string[])} [paths] The property paths to pick. + * @returns {Array} Returns the picked values. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 3 } }, 4] }; + * + * _.at(object, ['a[0].b.c', 'a[1]']); + * // => [3, 4] + */ + var at = flatRest(baseAt); + + /** + * Creates an object that inherits from the `prototype` object. If a + * `properties` object is given, its own enumerable string keyed properties + * are assigned to the created object. + * + * @static + * @memberOf _ + * @since 2.3.0 + * @category Object + * @param {Object} prototype The object to inherit from. + * @param {Object} [properties] The properties to assign to the object. + * @returns {Object} Returns the new object. + * @example + * + * function Shape() { + * this.x = 0; + * this.y = 0; + * } + * + * function Circle() { + * Shape.call(this); + * } + * + * Circle.prototype = _.create(Shape.prototype, { + * 'constructor': Circle + * }); + * + * var circle = new Circle; + * circle instanceof Circle; + * // => true + * + * circle instanceof Shape; + * // => true + */ + function create(prototype, properties) { + var result = baseCreate(prototype); + return properties == null ? result : baseAssign(result, properties); + } + + /** + * Assigns own and inherited enumerable string keyed properties of source + * objects to the destination object for all destination properties that + * resolve to `undefined`. Source objects are applied from left to right. + * Once a property is set, additional values of the same property are ignored. + * + * **Note:** This method mutates `object`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @see _.defaultsDeep + * @example + * + * _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); + * // => { 'a': 1, 'b': 2 } + */ + var defaults = baseRest(function(object, sources) { + object = Object(object); + + var index = -1; + var length = sources.length; + var guard = length > 2 ? sources[2] : undefined; + + if (guard && isIterateeCall(sources[0], sources[1], guard)) { + length = 1; + } + + while (++index < length) { + var source = sources[index]; + var props = keysIn(source); + var propsIndex = -1; + var propsLength = props.length; + + while (++propsIndex < propsLength) { + var key = props[propsIndex]; + var value = object[key]; + + if (value === undefined || + (eq(value, objectProto[key]) && !hasOwnProperty.call(object, key))) { + object[key] = source[key]; + } + } + } + + return object; + }); + + /** + * This method is like `_.defaults` except that it recursively assigns + * default properties. + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 3.10.0 + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @see _.defaults + * @example + * + * _.defaultsDeep({ 'a': { 'b': 2 } }, { 'a': { 'b': 1, 'c': 3 } }); + * // => { 'a': { 'b': 2, 'c': 3 } } + */ + var defaultsDeep = baseRest(function(args) { + args.push(undefined, customDefaultsMerge); + return apply(mergeWith, undefined, args); + }); + + /** + * This method is like `_.find` except that it returns the key of the first + * element `predicate` returns truthy for instead of the element itself. + * + * @static + * @memberOf _ + * @since 1.1.0 + * @category Object + * @param {Object} object The object to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {string|undefined} Returns the key of the matched element, + * else `undefined`. + * @example + * + * var users = { + * 'barney': { 'age': 36, 'active': true }, + * 'fred': { 'age': 40, 'active': false }, + * 'pebbles': { 'age': 1, 'active': true } + * }; + * + * _.findKey(users, function(o) { return o.age < 40; }); + * // => 'barney' (iteration order is not guaranteed) + * + * // The `_.matches` iteratee shorthand. + * _.findKey(users, { 'age': 1, 'active': true }); + * // => 'pebbles' + * + * // The `_.matchesProperty` iteratee shorthand. + * _.findKey(users, ['active', false]); + * // => 'fred' + * + * // The `_.property` iteratee shorthand. + * _.findKey(users, 'active'); + * // => 'barney' + */ + function findKey(object, predicate) { + return baseFindKey(object, getIteratee(predicate, 3), baseForOwn); + } + + /** + * This method is like `_.findKey` except that it iterates over elements of + * a collection in the opposite order. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Object + * @param {Object} object The object to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {string|undefined} Returns the key of the matched element, + * else `undefined`. + * @example + * + * var users = { + * 'barney': { 'age': 36, 'active': true }, + * 'fred': { 'age': 40, 'active': false }, + * 'pebbles': { 'age': 1, 'active': true } + * }; + * + * _.findLastKey(users, function(o) { return o.age < 40; }); + * // => returns 'pebbles' assuming `_.findKey` returns 'barney' + * + * // The `_.matches` iteratee shorthand. + * _.findLastKey(users, { 'age': 36, 'active': true }); + * // => 'barney' + * + * // The `_.matchesProperty` iteratee shorthand. + * _.findLastKey(users, ['active', false]); + * // => 'fred' + * + * // The `_.property` iteratee shorthand. + * _.findLastKey(users, 'active'); + * // => 'pebbles' + */ + function findLastKey(object, predicate) { + return baseFindKey(object, getIteratee(predicate, 3), baseForOwnRight); + } + + /** + * Iterates over own and inherited enumerable string keyed properties of an + * object and invokes `iteratee` for each property. The iteratee is invoked + * with three arguments: (value, key, object). Iteratee functions may exit + * iteration early by explicitly returning `false`. + * + * @static + * @memberOf _ + * @since 0.3.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns `object`. + * @see _.forInRight + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.forIn(new Foo, function(value, key) { + * console.log(key); + * }); + * // => Logs 'a', 'b', then 'c' (iteration order is not guaranteed). + */ + function forIn(object, iteratee) { + return object == null + ? object + : baseFor(object, getIteratee(iteratee, 3), keysIn); + } + + /** + * This method is like `_.forIn` except that it iterates over properties of + * `object` in the opposite order. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns `object`. + * @see _.forIn + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.forInRight(new Foo, function(value, key) { + * console.log(key); + * }); + * // => Logs 'c', 'b', then 'a' assuming `_.forIn` logs 'a', 'b', then 'c'. + */ + function forInRight(object, iteratee) { + return object == null + ? object + : baseForRight(object, getIteratee(iteratee, 3), keysIn); + } + + /** + * Iterates over own enumerable string keyed properties of an object and + * invokes `iteratee` for each property. The iteratee is invoked with three + * arguments: (value, key, object). Iteratee functions may exit iteration + * early by explicitly returning `false`. + * + * @static + * @memberOf _ + * @since 0.3.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns `object`. + * @see _.forOwnRight + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.forOwn(new Foo, function(value, key) { + * console.log(key); + * }); + * // => Logs 'a' then 'b' (iteration order is not guaranteed). + */ + function forOwn(object, iteratee) { + return object && baseForOwn(object, getIteratee(iteratee, 3)); + } + + /** + * This method is like `_.forOwn` except that it iterates over properties of + * `object` in the opposite order. + * + * @static + * @memberOf _ + * @since 2.0.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns `object`. + * @see _.forOwn + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.forOwnRight(new Foo, function(value, key) { + * console.log(key); + * }); + * // => Logs 'b' then 'a' assuming `_.forOwn` logs 'a' then 'b'. + */ + function forOwnRight(object, iteratee) { + return object && baseForOwnRight(object, getIteratee(iteratee, 3)); + } + + /** + * Creates an array of function property names from own enumerable properties + * of `object`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to inspect. + * @returns {Array} Returns the function names. + * @see _.functionsIn + * @example + * + * function Foo() { + * this.a = _.constant('a'); + * this.b = _.constant('b'); + * } + * + * Foo.prototype.c = _.constant('c'); + * + * _.functions(new Foo); + * // => ['a', 'b'] + */ + function functions(object) { + return object == null ? [] : baseFunctions(object, keys(object)); + } + + /** + * Creates an array of function property names from own and inherited + * enumerable properties of `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The object to inspect. + * @returns {Array} Returns the function names. + * @see _.functions + * @example + * + * function Foo() { + * this.a = _.constant('a'); + * this.b = _.constant('b'); + * } + * + * Foo.prototype.c = _.constant('c'); + * + * _.functionsIn(new Foo); + * // => ['a', 'b', 'c'] + */ + function functionsIn(object) { + return object == null ? [] : baseFunctions(object, keysIn(object)); + } + + /** + * Gets the value at `path` of `object`. If the resolved value is + * `undefined`, the `defaultValue` is returned in its place. + * + * @static + * @memberOf _ + * @since 3.7.0 + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path of the property to get. + * @param {*} [defaultValue] The value returned for `undefined` resolved values. + * @returns {*} Returns the resolved value. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 3 } }] }; + * + * _.get(object, 'a[0].b.c'); + * // => 3 + * + * _.get(object, ['a', '0', 'b', 'c']); + * // => 3 + * + * _.get(object, 'a.b.c', 'default'); + * // => 'default' + */ + function get(object, path, defaultValue) { + var result = object == null ? undefined : baseGet(object, path); + return result === undefined ? defaultValue : result; + } + + /** + * Checks if `path` is a direct property of `object`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path to check. + * @returns {boolean} Returns `true` if `path` exists, else `false`. + * @example + * + * var object = { 'a': { 'b': 2 } }; + * var other = _.create({ 'a': _.create({ 'b': 2 }) }); + * + * _.has(object, 'a'); + * // => true + * + * _.has(object, 'a.b'); + * // => true + * + * _.has(object, ['a', 'b']); + * // => true + * + * _.has(other, 'a'); + * // => false + */ + function has(object, path) { + return object != null && hasPath(object, path, baseHas); + } + + /** + * Checks if `path` is a direct or inherited property of `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path to check. + * @returns {boolean} Returns `true` if `path` exists, else `false`. + * @example + * + * var object = _.create({ 'a': _.create({ 'b': 2 }) }); + * + * _.hasIn(object, 'a'); + * // => true + * + * _.hasIn(object, 'a.b'); + * // => true + * + * _.hasIn(object, ['a', 'b']); + * // => true + * + * _.hasIn(object, 'b'); + * // => false + */ + function hasIn(object, path) { + return object != null && hasPath(object, path, baseHasIn); + } + + /** + * Creates an object composed of the inverted keys and values of `object`. + * If `object` contains duplicate values, subsequent values overwrite + * property assignments of previous values. + * + * @static + * @memberOf _ + * @since 0.7.0 + * @category Object + * @param {Object} object The object to invert. + * @returns {Object} Returns the new inverted object. + * @example + * + * var object = { 'a': 1, 'b': 2, 'c': 1 }; + * + * _.invert(object); + * // => { '1': 'c', '2': 'b' } + */ + var invert = createInverter(function(result, value, key) { + if (value != null && + typeof value.toString != 'function') { + value = nativeObjectToString.call(value); + } + + result[value] = key; + }, constant(identity)); + + /** + * This method is like `_.invert` except that the inverted object is generated + * from the results of running each element of `object` thru `iteratee`. The + * corresponding inverted value of each inverted key is an array of keys + * responsible for generating the inverted value. The iteratee is invoked + * with one argument: (value). + * + * @static + * @memberOf _ + * @since 4.1.0 + * @category Object + * @param {Object} object The object to invert. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {Object} Returns the new inverted object. + * @example + * + * var object = { 'a': 1, 'b': 2, 'c': 1 }; + * + * _.invertBy(object); + * // => { '1': ['a', 'c'], '2': ['b'] } + * + * _.invertBy(object, function(value) { + * return 'group' + value; + * }); + * // => { 'group1': ['a', 'c'], 'group2': ['b'] } + */ + var invertBy = createInverter(function(result, value, key) { + if (value != null && + typeof value.toString != 'function') { + value = nativeObjectToString.call(value); + } + + if (hasOwnProperty.call(result, value)) { + result[value].push(key); + } else { + result[value] = [key]; + } + }, getIteratee); + + /** + * Invokes the method at `path` of `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path of the method to invoke. + * @param {...*} [args] The arguments to invoke the method with. + * @returns {*} Returns the result of the invoked method. + * @example + * + * var object = { 'a': [{ 'b': { 'c': [1, 2, 3, 4] } }] }; + * + * _.invoke(object, 'a[0].b.c.slice', 1, 3); + * // => [2, 3] + */ + var invoke = baseRest(baseInvoke); + + /** + * Creates an array of the own enumerable property names of `object`. + * + * **Note:** Non-object values are coerced to objects. See the + * [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) + * for more details. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.keys(new Foo); + * // => ['a', 'b'] (iteration order is not guaranteed) + * + * _.keys('hi'); + * // => ['0', '1'] + */ + function keys(object) { + return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object); + } + + /** + * Creates an array of the own and inherited enumerable property names of `object`. + * + * **Note:** Non-object values are coerced to objects. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.keysIn(new Foo); + * // => ['a', 'b', 'c'] (iteration order is not guaranteed) + */ + function keysIn(object) { + return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object); + } + + /** + * The opposite of `_.mapValues`; this method creates an object with the + * same values as `object` and keys generated by running each own enumerable + * string keyed property of `object` thru `iteratee`. The iteratee is invoked + * with three arguments: (value, key, object). + * + * @static + * @memberOf _ + * @since 3.8.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns the new mapped object. + * @see _.mapValues + * @example + * + * _.mapKeys({ 'a': 1, 'b': 2 }, function(value, key) { + * return key + value; + * }); + * // => { 'a1': 1, 'b2': 2 } + */ + function mapKeys(object, iteratee) { + var result = {}; + iteratee = getIteratee(iteratee, 3); + + baseForOwn(object, function(value, key, object) { + baseAssignValue(result, iteratee(value, key, object), value); + }); + return result; + } + + /** + * Creates an object with the same keys as `object` and values generated + * by running each own enumerable string keyed property of `object` thru + * `iteratee`. The iteratee is invoked with three arguments: + * (value, key, object). + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns the new mapped object. + * @see _.mapKeys + * @example + * + * var users = { + * 'fred': { 'user': 'fred', 'age': 40 }, + * 'pebbles': { 'user': 'pebbles', 'age': 1 } + * }; + * + * _.mapValues(users, function(o) { return o.age; }); + * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) + * + * // The `_.property` iteratee shorthand. + * _.mapValues(users, 'age'); + * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) + */ + function mapValues(object, iteratee) { + var result = {}; + iteratee = getIteratee(iteratee, 3); + + baseForOwn(object, function(value, key, object) { + baseAssignValue(result, key, iteratee(value, key, object)); + }); + return result; + } + + /** + * This method is like `_.assign` except that it recursively merges own and + * inherited enumerable string keyed properties of source objects into the + * destination object. Source properties that resolve to `undefined` are + * skipped if a destination value exists. Array and plain object properties + * are merged recursively. Other objects and value types are overridden by + * assignment. Source objects are applied from left to right. Subsequent + * sources overwrite property assignments of previous sources. + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 0.5.0 + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @example + * + * var object = { + * 'a': [{ 'b': 2 }, { 'd': 4 }] + * }; + * + * var other = { + * 'a': [{ 'c': 3 }, { 'e': 5 }] + * }; + * + * _.merge(object, other); + * // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] } + */ + var merge = createAssigner(function(object, source, srcIndex) { + baseMerge(object, source, srcIndex); + }); + + /** + * This method is like `_.merge` except that it accepts `customizer` which + * is invoked to produce the merged values of the destination and source + * properties. If `customizer` returns `undefined`, merging is handled by the + * method instead. The `customizer` is invoked with six arguments: + * (objValue, srcValue, key, object, source, stack). + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The destination object. + * @param {...Object} sources The source objects. + * @param {Function} customizer The function to customize assigned values. + * @returns {Object} Returns `object`. + * @example + * + * function customizer(objValue, srcValue) { + * if (_.isArray(objValue)) { + * return objValue.concat(srcValue); + * } + * } + * + * var object = { 'a': [1], 'b': [2] }; + * var other = { 'a': [3], 'b': [4] }; + * + * _.mergeWith(object, other, customizer); + * // => { 'a': [1, 3], 'b': [2, 4] } + */ + var mergeWith = createAssigner(function(object, source, srcIndex, customizer) { + baseMerge(object, source, srcIndex, customizer); + }); + + /** + * The opposite of `_.pick`; this method creates an object composed of the + * own and inherited enumerable property paths of `object` that are not omitted. + * + * **Note:** This method is considerably slower than `_.pick`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The source object. + * @param {...(string|string[])} [paths] The property paths to omit. + * @returns {Object} Returns the new object. + * @example + * + * var object = { 'a': 1, 'b': '2', 'c': 3 }; + * + * _.omit(object, ['a', 'c']); + * // => { 'b': '2' } + */ + var omit = flatRest(function(object, paths) { + var result = {}; + if (object == null) { + return result; + } + var isDeep = false; + paths = arrayMap(paths, function(path) { + path = castPath(path, object); + isDeep || (isDeep = path.length > 1); + return path; + }); + copyObject(object, getAllKeysIn(object), result); + if (isDeep) { + result = baseClone(result, CLONE_DEEP_FLAG | CLONE_FLAT_FLAG | CLONE_SYMBOLS_FLAG, customOmitClone); + } + var length = paths.length; + while (length--) { + baseUnset(result, paths[length]); + } + return result; + }); + + /** + * The opposite of `_.pickBy`; this method creates an object composed of + * the own and inherited enumerable string keyed properties of `object` that + * `predicate` doesn't return truthy for. The predicate is invoked with two + * arguments: (value, key). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The source object. + * @param {Function} [predicate=_.identity] The function invoked per property. + * @returns {Object} Returns the new object. + * @example + * + * var object = { 'a': 1, 'b': '2', 'c': 3 }; + * + * _.omitBy(object, _.isNumber); + * // => { 'b': '2' } + */ + function omitBy(object, predicate) { + return pickBy(object, negate(getIteratee(predicate))); + } + + /** + * Creates an object composed of the picked `object` properties. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The source object. + * @param {...(string|string[])} [paths] The property paths to pick. + * @returns {Object} Returns the new object. + * @example + * + * var object = { 'a': 1, 'b': '2', 'c': 3 }; + * + * _.pick(object, ['a', 'c']); + * // => { 'a': 1, 'c': 3 } + */ + var pick = flatRest(function(object, paths) { + return object == null ? {} : basePick(object, paths); + }); + + /** + * Creates an object composed of the `object` properties `predicate` returns + * truthy for. The predicate is invoked with two arguments: (value, key). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The source object. + * @param {Function} [predicate=_.identity] The function invoked per property. + * @returns {Object} Returns the new object. + * @example + * + * var object = { 'a': 1, 'b': '2', 'c': 3 }; + * + * _.pickBy(object, _.isNumber); + * // => { 'a': 1, 'c': 3 } + */ + function pickBy(object, predicate) { + if (object == null) { + return {}; + } + var props = arrayMap(getAllKeysIn(object), function(prop) { + return [prop]; + }); + predicate = getIteratee(predicate); + return basePickBy(object, props, function(value, path) { + return predicate(value, path[0]); + }); + } + + /** + * This method is like `_.get` except that if the resolved value is a + * function it's invoked with the `this` binding of its parent object and + * its result is returned. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path of the property to resolve. + * @param {*} [defaultValue] The value returned for `undefined` resolved values. + * @returns {*} Returns the resolved value. + * @example + * + * var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] }; + * + * _.result(object, 'a[0].b.c1'); + * // => 3 + * + * _.result(object, 'a[0].b.c2'); + * // => 4 + * + * _.result(object, 'a[0].b.c3', 'default'); + * // => 'default' + * + * _.result(object, 'a[0].b.c3', _.constant('default')); + * // => 'default' + */ + function result(object, path, defaultValue) { + path = castPath(path, object); + + var index = -1, + length = path.length; + + // Ensure the loop is entered when path is empty. + if (!length) { + length = 1; + object = undefined; + } + while (++index < length) { + var value = object == null ? undefined : object[toKey(path[index])]; + if (value === undefined) { + index = length; + value = defaultValue; + } + object = isFunction(value) ? value.call(object) : value; + } + return object; + } + + /** + * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, + * it's created. Arrays are created for missing index properties while objects + * are created for all other missing properties. Use `_.setWith` to customize + * `path` creation. + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 3.7.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @returns {Object} Returns `object`. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 3 } }] }; + * + * _.set(object, 'a[0].b.c', 4); + * console.log(object.a[0].b.c); + * // => 4 + * + * _.set(object, ['x', '0', 'y', 'z'], 5); + * console.log(object.x[0].y.z); + * // => 5 + */ + function set(object, path, value) { + return object == null ? object : baseSet(object, path, value); + } + + /** + * This method is like `_.set` except that it accepts `customizer` which is + * invoked to produce the objects of `path`. If `customizer` returns `undefined` + * path creation is handled by the method instead. The `customizer` is invoked + * with three arguments: (nsValue, key, nsObject). + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @param {Function} [customizer] The function to customize assigned values. + * @returns {Object} Returns `object`. + * @example + * + * var object = {}; + * + * _.setWith(object, '[0][1]', 'a', Object); + * // => { '0': { '1': 'a' } } + */ + function setWith(object, path, value, customizer) { + customizer = typeof customizer == 'function' ? customizer : undefined; + return object == null ? object : baseSet(object, path, value, customizer); + } + + /** + * Creates an array of own enumerable string keyed-value pairs for `object` + * which can be consumed by `_.fromPairs`. If `object` is a map or set, its + * entries are returned. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @alias entries + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the key-value pairs. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.toPairs(new Foo); + * // => [['a', 1], ['b', 2]] (iteration order is not guaranteed) + */ + var toPairs = createToPairs(keys); + + /** + * Creates an array of own and inherited enumerable string keyed-value pairs + * for `object` which can be consumed by `_.fromPairs`. If `object` is a map + * or set, its entries are returned. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @alias entriesIn + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the key-value pairs. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.toPairsIn(new Foo); + * // => [['a', 1], ['b', 2], ['c', 3]] (iteration order is not guaranteed) + */ + var toPairsIn = createToPairs(keysIn); + + /** + * An alternative to `_.reduce`; this method transforms `object` to a new + * `accumulator` object which is the result of running each of its own + * enumerable string keyed properties thru `iteratee`, with each invocation + * potentially mutating the `accumulator` object. If `accumulator` is not + * provided, a new object with the same `[[Prototype]]` will be used. The + * iteratee is invoked with four arguments: (accumulator, value, key, object). + * Iteratee functions may exit iteration early by explicitly returning `false`. + * + * @static + * @memberOf _ + * @since 1.3.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @param {*} [accumulator] The custom accumulator value. + * @returns {*} Returns the accumulated value. + * @example + * + * _.transform([2, 3, 4], function(result, n) { + * result.push(n *= n); + * return n % 2 == 0; + * }, []); + * // => [4, 9] + * + * _.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { + * (result[value] || (result[value] = [])).push(key); + * }, {}); + * // => { '1': ['a', 'c'], '2': ['b'] } + */ + function transform(object, iteratee, accumulator) { + var isArr = isArray(object), + isArrLike = isArr || isBuffer(object) || isTypedArray(object); + + iteratee = getIteratee(iteratee, 4); + if (accumulator == null) { + var Ctor = object && object.constructor; + if (isArrLike) { + accumulator = isArr ? new Ctor : []; + } + else if (isObject(object)) { + accumulator = isFunction(Ctor) ? baseCreate(getPrototype(object)) : {}; + } + else { + accumulator = {}; + } + } + (isArrLike ? arrayEach : baseForOwn)(object, function(value, index, object) { + return iteratee(accumulator, value, index, object); + }); + return accumulator; + } + + /** + * Removes the property at `path` of `object`. + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to unset. + * @returns {boolean} Returns `true` if the property is deleted, else `false`. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 7 } }] }; + * _.unset(object, 'a[0].b.c'); + * // => true + * + * console.log(object); + * // => { 'a': [{ 'b': {} }] }; + * + * _.unset(object, ['a', '0', 'b', 'c']); + * // => true + * + * console.log(object); + * // => { 'a': [{ 'b': {} }] }; + */ + function unset(object, path) { + return object == null ? true : baseUnset(object, path); + } + + /** + * This method is like `_.set` except that accepts `updater` to produce the + * value to set. Use `_.updateWith` to customize `path` creation. The `updater` + * is invoked with one argument: (value). + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.6.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {Function} updater The function to produce the updated value. + * @returns {Object} Returns `object`. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 3 } }] }; + * + * _.update(object, 'a[0].b.c', function(n) { return n * n; }); + * console.log(object.a[0].b.c); + * // => 9 + * + * _.update(object, 'x[0].y.z', function(n) { return n ? n + 1 : 0; }); + * console.log(object.x[0].y.z); + * // => 0 + */ + function update(object, path, updater) { + return object == null ? object : baseUpdate(object, path, castFunction(updater)); + } + + /** + * This method is like `_.update` except that it accepts `customizer` which is + * invoked to produce the objects of `path`. If `customizer` returns `undefined` + * path creation is handled by the method instead. The `customizer` is invoked + * with three arguments: (nsValue, key, nsObject). + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 4.6.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {Function} updater The function to produce the updated value. + * @param {Function} [customizer] The function to customize assigned values. + * @returns {Object} Returns `object`. + * @example + * + * var object = {}; + * + * _.updateWith(object, '[0][1]', _.constant('a'), Object); + * // => { '0': { '1': 'a' } } + */ + function updateWith(object, path, updater, customizer) { + customizer = typeof customizer == 'function' ? customizer : undefined; + return object == null ? object : baseUpdate(object, path, castFunction(updater), customizer); + } + + /** + * Creates an array of the own enumerable string keyed property values of `object`. + * + * **Note:** Non-object values are coerced to objects. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property values. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.values(new Foo); + * // => [1, 2] (iteration order is not guaranteed) + * + * _.values('hi'); + * // => ['h', 'i'] + */ + function values(object) { + return object == null ? [] : baseValues(object, keys(object)); + } + + /** + * Creates an array of the own and inherited enumerable string keyed property + * values of `object`. + * + * **Note:** Non-object values are coerced to objects. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property values. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.valuesIn(new Foo); + * // => [1, 2, 3] (iteration order is not guaranteed) + */ + function valuesIn(object) { + return object == null ? [] : baseValues(object, keysIn(object)); + } + + /*------------------------------------------------------------------------*/ + + /** + * Clamps `number` within the inclusive `lower` and `upper` bounds. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Number + * @param {number} number The number to clamp. + * @param {number} [lower] The lower bound. + * @param {number} upper The upper bound. + * @returns {number} Returns the clamped number. + * @example + * + * _.clamp(-10, -5, 5); + * // => -5 + * + * _.clamp(10, -5, 5); + * // => 5 + */ + function clamp(number, lower, upper) { + if (upper === undefined) { + upper = lower; + lower = undefined; + } + if (upper !== undefined) { + upper = toNumber(upper); + upper = upper === upper ? upper : 0; + } + if (lower !== undefined) { + lower = toNumber(lower); + lower = lower === lower ? lower : 0; + } + return baseClamp(toNumber(number), lower, upper); + } + + /** + * Checks if `n` is between `start` and up to, but not including, `end`. If + * `end` is not specified, it's set to `start` with `start` then set to `0`. + * If `start` is greater than `end` the params are swapped to support + * negative ranges. + * + * @static + * @memberOf _ + * @since 3.3.0 + * @category Number + * @param {number} number The number to check. + * @param {number} [start=0] The start of the range. + * @param {number} end The end of the range. + * @returns {boolean} Returns `true` if `number` is in the range, else `false`. + * @see _.range, _.rangeRight + * @example + * + * _.inRange(3, 2, 4); + * // => true + * + * _.inRange(4, 8); + * // => true + * + * _.inRange(4, 2); + * // => false + * + * _.inRange(2, 2); + * // => false + * + * _.inRange(1.2, 2); + * // => true + * + * _.inRange(5.2, 4); + * // => false + * + * _.inRange(-3, -2, -6); + * // => true + */ + function inRange(number, start, end) { + start = toFinite(start); + if (end === undefined) { + end = start; + start = 0; + } else { + end = toFinite(end); + } + number = toNumber(number); + return baseInRange(number, start, end); + } + + /** + * Produces a random number between the inclusive `lower` and `upper` bounds. + * If only one argument is provided a number between `0` and the given number + * is returned. If `floating` is `true`, or either `lower` or `upper` are + * floats, a floating-point number is returned instead of an integer. + * + * **Note:** JavaScript follows the IEEE-754 standard for resolving + * floating-point values which can produce unexpected results. + * + * @static + * @memberOf _ + * @since 0.7.0 + * @category Number + * @param {number} [lower=0] The lower bound. + * @param {number} [upper=1] The upper bound. + * @param {boolean} [floating] Specify returning a floating-point number. + * @returns {number} Returns the random number. + * @example + * + * _.random(0, 5); + * // => an integer between 0 and 5 + * + * _.random(5); + * // => also an integer between 0 and 5 + * + * _.random(5, true); + * // => a floating-point number between 0 and 5 + * + * _.random(1.2, 5.2); + * // => a floating-point number between 1.2 and 5.2 + */ + function random(lower, upper, floating) { + if (floating && typeof floating != 'boolean' && isIterateeCall(lower, upper, floating)) { + upper = floating = undefined; + } + if (floating === undefined) { + if (typeof upper == 'boolean') { + floating = upper; + upper = undefined; + } + else if (typeof lower == 'boolean') { + floating = lower; + lower = undefined; + } + } + if (lower === undefined && upper === undefined) { + lower = 0; + upper = 1; + } + else { + lower = toFinite(lower); + if (upper === undefined) { + upper = lower; + lower = 0; + } else { + upper = toFinite(upper); + } + } + if (lower > upper) { + var temp = lower; + lower = upper; + upper = temp; + } + if (floating || lower % 1 || upper % 1) { + var rand = nativeRandom(); + return nativeMin(lower + (rand * (upper - lower + freeParseFloat('1e-' + ((rand + '').length - 1)))), upper); + } + return baseRandom(lower, upper); + } + + /*------------------------------------------------------------------------*/ + + /** + * Converts `string` to [camel case](https://en.wikipedia.org/wiki/CamelCase). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to convert. + * @returns {string} Returns the camel cased string. + * @example + * + * _.camelCase('Foo Bar'); + * // => 'fooBar' + * + * _.camelCase('--foo-bar--'); + * // => 'fooBar' + * + * _.camelCase('__FOO_BAR__'); + * // => 'fooBar' + */ + var camelCase = createCompounder(function(result, word, index) { + word = word.toLowerCase(); + return result + (index ? capitalize(word) : word); + }); + + /** + * Converts the first character of `string` to upper case and the remaining + * to lower case. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to capitalize. + * @returns {string} Returns the capitalized string. + * @example + * + * _.capitalize('FRED'); + * // => 'Fred' + */ + function capitalize(string) { + return upperFirst(toString(string).toLowerCase()); + } + + /** + * Deburrs `string` by converting + * [Latin-1 Supplement](https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)#Character_table) + * and [Latin Extended-A](https://en.wikipedia.org/wiki/Latin_Extended-A) + * letters to basic Latin letters and removing + * [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to deburr. + * @returns {string} Returns the deburred string. + * @example + * + * _.deburr('dÊjà vu'); + * // => 'deja vu' + */ + function deburr(string) { + string = toString(string); + return string && string.replace(reLatin, deburrLetter).replace(reComboMark, ''); + } + + /** + * Checks if `string` ends with the given target string. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to inspect. + * @param {string} [target] The string to search for. + * @param {number} [position=string.length] The position to search up to. + * @returns {boolean} Returns `true` if `string` ends with `target`, + * else `false`. + * @example + * + * _.endsWith('abc', 'c'); + * // => true + * + * _.endsWith('abc', 'b'); + * // => false + * + * _.endsWith('abc', 'b', 2); + * // => true + */ + function endsWith(string, target, position) { + string = toString(string); + target = baseToString(target); + + var length = string.length; + position = position === undefined + ? length + : baseClamp(toInteger(position), 0, length); + + var end = position; + position -= target.length; + return position >= 0 && string.slice(position, end) == target; + } + + /** + * Converts the characters "&", "<", ">", '"', and "'" in `string` to their + * corresponding HTML entities. + * + * **Note:** No other characters are escaped. To escape additional + * characters use a third-party library like [_he_](https://mths.be/he). + * + * Though the ">" character is escaped for symmetry, characters like + * ">" and "/" don't need escaping in HTML and have no special meaning + * unless they're part of a tag or unquoted attribute value. See + * [Mathias Bynens's article](https://mathiasbynens.be/notes/ambiguous-ampersands) + * (under "semi-related fun fact") for more details. + * + * When working with HTML you should always + * [quote attribute values](http://wonko.com/post/html-escaping) to reduce + * XSS vectors. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category String + * @param {string} [string=''] The string to escape. + * @returns {string} Returns the escaped string. + * @example + * + * _.escape('fred, barney, & pebbles'); + * // => 'fred, barney, & pebbles' + */ + function escape(string) { + string = toString(string); + return (string && reHasUnescapedHtml.test(string)) + ? string.replace(reUnescapedHtml, escapeHtmlChar) + : string; + } + + /** + * Escapes the `RegExp` special characters "^", "$", "\", ".", "*", "+", + * "?", "(", ")", "[", "]", "{", "}", and "|" in `string`. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to escape. + * @returns {string} Returns the escaped string. + * @example + * + * _.escapeRegExp('[lodash](https://lodash.com/)'); + * // => '\[lodash\]\(https://lodash\.com/\)' + */ + function escapeRegExp(string) { + string = toString(string); + return (string && reHasRegExpChar.test(string)) + ? string.replace(reRegExpChar, '\\$&') + : string; + } + + /** + * Converts `string` to + * [kebab case](https://en.wikipedia.org/wiki/Letter_case#Special_case_styles). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to convert. + * @returns {string} Returns the kebab cased string. + * @example + * + * _.kebabCase('Foo Bar'); + * // => 'foo-bar' + * + * _.kebabCase('fooBar'); + * // => 'foo-bar' + * + * _.kebabCase('__FOO_BAR__'); + * // => 'foo-bar' + */ + var kebabCase = createCompounder(function(result, word, index) { + return result + (index ? '-' : '') + word.toLowerCase(); + }); + + /** + * Converts `string`, as space separated words, to lower case. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category String + * @param {string} [string=''] The string to convert. + * @returns {string} Returns the lower cased string. + * @example + * + * _.lowerCase('--Foo-Bar--'); + * // => 'foo bar' + * + * _.lowerCase('fooBar'); + * // => 'foo bar' + * + * _.lowerCase('__FOO_BAR__'); + * // => 'foo bar' + */ + var lowerCase = createCompounder(function(result, word, index) { + return result + (index ? ' ' : '') + word.toLowerCase(); + }); + + /** + * Converts the first character of `string` to lower case. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category String + * @param {string} [string=''] The string to convert. + * @returns {string} Returns the converted string. + * @example + * + * _.lowerFirst('Fred'); + * // => 'fred' + * + * _.lowerFirst('FRED'); + * // => 'fRED' + */ + var lowerFirst = createCaseFirst('toLowerCase'); + + /** + * Pads `string` on the left and right sides if it's shorter than `length`. + * Padding characters are truncated if they can't be evenly divided by `length`. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to pad. + * @param {number} [length=0] The padding length. + * @param {string} [chars=' '] The string used as padding. + * @returns {string} Returns the padded string. + * @example + * + * _.pad('abc', 8); + * // => ' abc ' + * + * _.pad('abc', 8, '_-'); + * // => '_-abc_-_' + * + * _.pad('abc', 3); + * // => 'abc' + */ + function pad(string, length, chars) { + string = toString(string); + length = toInteger(length); + + var strLength = length ? stringSize(string) : 0; + if (!length || strLength >= length) { + return string; + } + var mid = (length - strLength) / 2; + return ( + createPadding(nativeFloor(mid), chars) + + string + + createPadding(nativeCeil(mid), chars) + ); + } + + /** + * Pads `string` on the right side if it's shorter than `length`. Padding + * characters are truncated if they exceed `length`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category String + * @param {string} [string=''] The string to pad. + * @param {number} [length=0] The padding length. + * @param {string} [chars=' '] The string used as padding. + * @returns {string} Returns the padded string. + * @example + * + * _.padEnd('abc', 6); + * // => 'abc ' + * + * _.padEnd('abc', 6, '_-'); + * // => 'abc_-_' + * + * _.padEnd('abc', 3); + * // => 'abc' + */ + function padEnd(string, length, chars) { + string = toString(string); + length = toInteger(length); + + var strLength = length ? stringSize(string) : 0; + return (length && strLength < length) + ? (string + createPadding(length - strLength, chars)) + : string; + } + + /** + * Pads `string` on the left side if it's shorter than `length`. Padding + * characters are truncated if they exceed `length`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category String + * @param {string} [string=''] The string to pad. + * @param {number} [length=0] The padding length. + * @param {string} [chars=' '] The string used as padding. + * @returns {string} Returns the padded string. + * @example + * + * _.padStart('abc', 6); + * // => ' abc' + * + * _.padStart('abc', 6, '_-'); + * // => '_-_abc' + * + * _.padStart('abc', 3); + * // => 'abc' + */ + function padStart(string, length, chars) { + string = toString(string); + length = toInteger(length); + + var strLength = length ? stringSize(string) : 0; + return (length && strLength < length) + ? (createPadding(length - strLength, chars) + string) + : string; + } + + /** + * Converts `string` to an integer of the specified radix. If `radix` is + * `undefined` or `0`, a `radix` of `10` is used unless `value` is a + * hexadecimal, in which case a `radix` of `16` is used. + * + * **Note:** This method aligns with the + * [ES5 implementation](https://es5.github.io/#x15.1.2.2) of `parseInt`. + * + * @static + * @memberOf _ + * @since 1.1.0 + * @category String + * @param {string} string The string to convert. + * @param {number} [radix=10] The radix to interpret `value` by. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {number} Returns the converted integer. + * @example + * + * _.parseInt('08'); + * // => 8 + * + * _.map(['6', '08', '10'], _.parseInt); + * // => [6, 8, 10] + */ + function parseInt(string, radix, guard) { + if (guard || radix == null) { + radix = 0; + } else if (radix) { + radix = +radix; + } + return nativeParseInt(toString(string).replace(reTrimStart, ''), radix || 0); + } + + /** + * Repeats the given string `n` times. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to repeat. + * @param {number} [n=1] The number of times to repeat the string. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {string} Returns the repeated string. + * @example + * + * _.repeat('*', 3); + * // => '***' + * + * _.repeat('abc', 2); + * // => 'abcabc' + * + * _.repeat('abc', 0); + * // => '' + */ + function repeat(string, n, guard) { + if ((guard ? isIterateeCall(string, n, guard) : n === undefined)) { + n = 1; + } else { + n = toInteger(n); + } + return baseRepeat(toString(string), n); + } + + /** + * Replaces matches for `pattern` in `string` with `replacement`. + * + * **Note:** This method is based on + * [`String#replace`](https://mdn.io/String/replace). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category String + * @param {string} [string=''] The string to modify. + * @param {RegExp|string} pattern The pattern to replace. + * @param {Function|string} replacement The match replacement. + * @returns {string} Returns the modified string. + * @example + * + * _.replace('Hi Fred', 'Fred', 'Barney'); + * // => 'Hi Barney' + */ + function replace() { + var args = arguments, + string = toString(args[0]); + + return args.length < 3 ? string : string.replace(args[1], args[2]); + } + + /** + * Converts `string` to + * [snake case](https://en.wikipedia.org/wiki/Snake_case). + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to convert. + * @returns {string} Returns the snake cased string. + * @example + * + * _.snakeCase('Foo Bar'); + * // => 'foo_bar' + * + * _.snakeCase('fooBar'); + * // => 'foo_bar' + * + * _.snakeCase('--FOO-BAR--'); + * // => 'foo_bar' + */ + var snakeCase = createCompounder(function(result, word, index) { + return result + (index ? '_' : '') + word.toLowerCase(); + }); + + /** + * Splits `string` by `separator`. + * + * **Note:** This method is based on + * [`String#split`](https://mdn.io/String/split). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category String + * @param {string} [string=''] The string to split. + * @param {RegExp|string} separator The separator pattern to split by. + * @param {number} [limit] The length to truncate results to. + * @returns {Array} Returns the string segments. + * @example + * + * _.split('a-b-c', '-', 2); + * // => ['a', 'b'] + */ + function split(string, separator, limit) { + if (limit && typeof limit != 'number' && isIterateeCall(string, separator, limit)) { + separator = limit = undefined; + } + limit = limit === undefined ? MAX_ARRAY_LENGTH : limit >>> 0; + if (!limit) { + return []; + } + string = toString(string); + if (string && ( + typeof separator == 'string' || + (separator != null && !isRegExp(separator)) + )) { + separator = baseToString(separator); + if (!separator && hasUnicode(string)) { + return castSlice(stringToArray(string), 0, limit); + } + } + return string.split(separator, limit); + } + + /** + * Converts `string` to + * [start case](https://en.wikipedia.org/wiki/Letter_case#Stylistic_or_specialised_usage). + * + * @static + * @memberOf _ + * @since 3.1.0 + * @category String + * @param {string} [string=''] The string to convert. + * @returns {string} Returns the start cased string. + * @example + * + * _.startCase('--foo-bar--'); + * // => 'Foo Bar' + * + * _.startCase('fooBar'); + * // => 'Foo Bar' + * + * _.startCase('__FOO_BAR__'); + * // => 'FOO BAR' + */ + var startCase = createCompounder(function(result, word, index) { + return result + (index ? ' ' : '') + upperFirst(word); + }); + + /** + * Checks if `string` starts with the given target string. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category String + * @param {string} [string=''] The string to inspect. + * @param {string} [target] The string to search for. + * @param {number} [position=0] The position to search from. + * @returns {boolean} Returns `true` if `string` starts with `target`, + * else `false`. + * @example + * + * _.startsWith('abc', 'a'); + * // => true + * + * _.startsWith('abc', 'b'); + * // => false + * + * _.startsWith('abc', 'b', 1); + * // => true + */ + function startsWith(string, target, position) { + string = toString(string); + position = position == null + ? 0 + : baseClamp(toInteger(position), 0, string.length); + + target = baseToString(target); + return string.slice(position, position + target.length) == target; + } + + /** + * Creates a compiled template function that can interpolate data properties + * in "interpolate" delimiters, HTML-escape interpolated data properties in + * "escape" delimiters, and execute JavaScript in "evaluate" delimiters. Data + * properties may be accessed as free variables in the template. If a setting + * object is given, it takes precedence over `_.templateSettings` values. + * + * **Note:** In the development build `_.template` utilizes + * [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl) + * for easier debugging. + * + * For more information on precompiling templates see + * [lodash's custom builds documentation](https://lodash.com/custom-builds). + * + * For more information on Chrome extension sandboxes see + * [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval). + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category String + * @param {string} [string=''] The template string. + * @param {Object} [options={}] The options object. + * @param {RegExp} [options.escape=_.templateSettings.escape] + * The HTML "escape" delimiter. + * @param {RegExp} [options.evaluate=_.templateSettings.evaluate] + * The "evaluate" delimiter. + * @param {Object} [options.imports=_.templateSettings.imports] + * An object to import into the template as free variables. + * @param {RegExp} [options.interpolate=_.templateSettings.interpolate] + * The "interpolate" delimiter. + * @param {string} [options.sourceURL='lodash.templateSources[n]'] + * The sourceURL of the compiled template. + * @param {string} [options.variable='obj'] + * The data object variable name. + * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. + * @returns {Function} Returns the compiled template function. + * @example + * + * // Use the "interpolate" delimiter to create a compiled template. + * var compiled = _.template('hello <%= user %>!'); + * compiled({ 'user': 'fred' }); + * // => 'hello fred!' + * + * // Use the HTML "escape" delimiter to escape data property values. + * var compiled = _.template('<%- value %>'); + * compiled({ 'value': '